repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jkugler/ansible | lib/ansible/playbook/__init__.py | 108 | 3720 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import get_all_plugin_loaders
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
self._loader.set_basedir(self._basedir)
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(self._basedir, obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
ds = self._loader.load_from_file(os.path.basename(file_name))
if not isinstance(ds, list):
raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if 'include' in entry:
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan')
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
| gpl-3.0 |
dya2/python-for-android | python3-alpha/python3-src/Lib/http/cookies.py | 47 | 20357 | #!/usr/bin/env python3
#
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s=%s>' % (self.__class__.__name__,
self.key, repr(self.value))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
\s*;? # Probably ending in a semi-colon
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
M[key] = _unquote(value)
else:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
| apache-2.0 |
krikru/tensorflow-opencl | tensorflow/python/ops/linalg_grad.py | 61 | 8371 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in linalg_ops.py.
Useful reference for derivative formulas is
An extended collection of matrix derivative results for forward and reverse
mode algorithmic differentiation by Mike Giles:
http://eprints.maths.ox.ac.uk/1079/1/NA-08-01.pdf
A detailed derivation of formulas for backpropagating through spectral layers
(SVD and Eig) by Ionescu, Vantzos & Sminchisescu:
https://arxiv.org/pdf/1509.07838v4.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("MatrixInverse")
def _MatrixInverseGrad(op, grad):
"""Gradient for MatrixInverse."""
ainv = op.outputs[0]
return -math_ops.matmul(
ainv, math_ops.matmul(
grad, ainv, adjoint_b=True), adjoint_a=True)
@ops.RegisterGradient("MatrixDeterminant")
def _MatrixDeterminantGrad(op, grad):
"""Gradient for MatrixDeterminant."""
a = op.inputs[0]
c = op.outputs[0]
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
multipliers = array_ops.reshape(
grad * c, array_ops.concat([array_ops.shape(c), [1, 1]], 0))
return multipliers * a_adj_inv
@ops.RegisterGradient("Cholesky")
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
return linalg_ops.cholesky_grad(op.outputs[0], grad)
@ops.RegisterGradient("MatrixSolve")
def _MatrixSolveGrad(op, grad):
"""Gradient for MatrixSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
c = op.outputs[0]
grad_b = linalg_ops.matrix_solve(a, grad, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
return (grad_a, grad_b)
@ops.RegisterGradient("MatrixSolveLs")
def _MatrixSolveLsGrad(op, grad):
"""Gradients for MatrixSolveLs."""
# TODO(rmlarsen): The implementation could be more efficient:
# a) Output the Cholesky factorization from forward op instead of
# recomputing it here.
# b) Implement a symmetric rank-k update op instead of computing
# x*z + transpose(x*z). This pattern occurs other places in TensorFlow.
def _overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
x = op.outputs[0]
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
n = a_shape[-1]
identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_a=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
def _underdetermined(op, grad):
"""Gradients for the underdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the second
kind:
X = F(A, B) = A * (A*A^T + lambda*I)^{-1} * B
that (for lambda=0) solve the least squares problem
min ||X||_F subject to A*X = B.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
m = a_shape[-2]
identity = linalg_ops.eye(m, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_b=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
grad_b = linalg_ops.cholesky_solve(chol, math_ops.matmul(a, grad))
# Temporary tmp = (A * A^T + lambda * I)^{-1} * B.
tmp = linalg_ops.cholesky_solve(chol, b)
a1 = math_ops.matmul(tmp, a, adjoint_a=True)
a1 = -math_ops.matmul(grad_b, a1)
a2 = grad - math_ops.matmul(a, grad_b, adjoint_a=True)
a2 = math_ops.matmul(tmp, a2, adjoint_b=True)
grad_a = a1 + a2
return (grad_a, grad_b, None)
fast = op.get_attr("fast")
if fast is False:
raise ValueError("Gradient not defined for fast=False")
matrix_shape = op.inputs[0].get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(op, grad)
else:
return _underdetermined(op, grad)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(op.inputs[0])[-2:]
return control_flow_ops.cond(matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(op, grad),
lambda: _underdetermined(op, grad))
@ops.RegisterGradient("MatrixTriangularSolve")
def _MatrixTriangularSolveGrad(op, grad):
"""Gradient for MatrixTriangularSolve."""
a = op.inputs[0]
adjoint_a = op.get_attr("adjoint")
lower_a = op.get_attr("lower")
c = op.outputs[0]
grad_b = linalg_ops.matrix_triangular_solve(
a, grad, lower=lower_a, adjoint=not adjoint_a)
if adjoint_a:
grad_a = -math_ops.matmul(c, grad_b, adjoint_b=True)
else:
grad_a = -math_ops.matmul(grad_b, c, adjoint_b=True)
if lower_a:
grad_a = array_ops.matrix_band_part(grad_a, -1, 0)
else:
grad_a = array_ops.matrix_band_part(grad_a, 0, -1)
return (grad_a, grad_b)
@ops.RegisterGradient("SelfAdjointEigV2")
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
v = op.outputs[1]
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e.op, grad_v.op]):
if grad_v is not None:
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) + f * math_ops.matmul(
v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
grad_a = math_ops.matmul(
v, math_ops.matmul(
array_ops.matrix_diag(grad_e), v, adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(
grad_a + array_ops.matrix_transpose(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
| apache-2.0 |
ohmini/thaifoodapi | lib/django/contrib/gis/geos/collections.py | 292 | 4986 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
jetskijoe/headphones | lib/unidecode/x091.py | 252 | 4655 | data = (
'Ruo ', # 0x00
'Bei ', # 0x01
'E ', # 0x02
'Yu ', # 0x03
'Juan ', # 0x04
'Yu ', # 0x05
'Yun ', # 0x06
'Hou ', # 0x07
'Kui ', # 0x08
'Xiang ', # 0x09
'Xiang ', # 0x0a
'Sou ', # 0x0b
'Tang ', # 0x0c
'Ming ', # 0x0d
'Xi ', # 0x0e
'Ru ', # 0x0f
'Chu ', # 0x10
'Zi ', # 0x11
'Zou ', # 0x12
'Ju ', # 0x13
'Wu ', # 0x14
'Xiang ', # 0x15
'Yun ', # 0x16
'Hao ', # 0x17
'Yong ', # 0x18
'Bi ', # 0x19
'Mo ', # 0x1a
'Chao ', # 0x1b
'Fu ', # 0x1c
'Liao ', # 0x1d
'Yin ', # 0x1e
'Zhuan ', # 0x1f
'Hu ', # 0x20
'Qiao ', # 0x21
'Yan ', # 0x22
'Zhang ', # 0x23
'Fan ', # 0x24
'Qiao ', # 0x25
'Xu ', # 0x26
'Deng ', # 0x27
'Bi ', # 0x28
'Xin ', # 0x29
'Bi ', # 0x2a
'Ceng ', # 0x2b
'Wei ', # 0x2c
'Zheng ', # 0x2d
'Mao ', # 0x2e
'Shan ', # 0x2f
'Lin ', # 0x30
'Po ', # 0x31
'Dan ', # 0x32
'Meng ', # 0x33
'Ye ', # 0x34
'Cao ', # 0x35
'Kuai ', # 0x36
'Feng ', # 0x37
'Meng ', # 0x38
'Zou ', # 0x39
'Kuang ', # 0x3a
'Lian ', # 0x3b
'Zan ', # 0x3c
'Chan ', # 0x3d
'You ', # 0x3e
'Qi ', # 0x3f
'Yan ', # 0x40
'Chan ', # 0x41
'Zan ', # 0x42
'Ling ', # 0x43
'Huan ', # 0x44
'Xi ', # 0x45
'Feng ', # 0x46
'Zan ', # 0x47
'Li ', # 0x48
'You ', # 0x49
'Ding ', # 0x4a
'Qiu ', # 0x4b
'Zhuo ', # 0x4c
'Pei ', # 0x4d
'Zhou ', # 0x4e
'Yi ', # 0x4f
'Hang ', # 0x50
'Yu ', # 0x51
'Jiu ', # 0x52
'Yan ', # 0x53
'Zui ', # 0x54
'Mao ', # 0x55
'Dan ', # 0x56
'Xu ', # 0x57
'Tou ', # 0x58
'Zhen ', # 0x59
'Fen ', # 0x5a
'Sakenomoto ', # 0x5b
'[?] ', # 0x5c
'Yun ', # 0x5d
'Tai ', # 0x5e
'Tian ', # 0x5f
'Qia ', # 0x60
'Tuo ', # 0x61
'Zuo ', # 0x62
'Han ', # 0x63
'Gu ', # 0x64
'Su ', # 0x65
'Po ', # 0x66
'Chou ', # 0x67
'Zai ', # 0x68
'Ming ', # 0x69
'Luo ', # 0x6a
'Chuo ', # 0x6b
'Chou ', # 0x6c
'You ', # 0x6d
'Tong ', # 0x6e
'Zhi ', # 0x6f
'Xian ', # 0x70
'Jiang ', # 0x71
'Cheng ', # 0x72
'Yin ', # 0x73
'Tu ', # 0x74
'Xiao ', # 0x75
'Mei ', # 0x76
'Ku ', # 0x77
'Suan ', # 0x78
'Lei ', # 0x79
'Pu ', # 0x7a
'Zui ', # 0x7b
'Hai ', # 0x7c
'Yan ', # 0x7d
'Xi ', # 0x7e
'Niang ', # 0x7f
'Wei ', # 0x80
'Lu ', # 0x81
'Lan ', # 0x82
'Yan ', # 0x83
'Tao ', # 0x84
'Pei ', # 0x85
'Zhan ', # 0x86
'Chun ', # 0x87
'Tan ', # 0x88
'Zui ', # 0x89
'Chuo ', # 0x8a
'Cu ', # 0x8b
'Kun ', # 0x8c
'Ti ', # 0x8d
'Mian ', # 0x8e
'Du ', # 0x8f
'Hu ', # 0x90
'Xu ', # 0x91
'Xing ', # 0x92
'Tan ', # 0x93
'Jiu ', # 0x94
'Chun ', # 0x95
'Yun ', # 0x96
'Po ', # 0x97
'Ke ', # 0x98
'Sou ', # 0x99
'Mi ', # 0x9a
'Quan ', # 0x9b
'Chou ', # 0x9c
'Cuo ', # 0x9d
'Yun ', # 0x9e
'Yong ', # 0x9f
'Ang ', # 0xa0
'Zha ', # 0xa1
'Hai ', # 0xa2
'Tang ', # 0xa3
'Jiang ', # 0xa4
'Piao ', # 0xa5
'Shan ', # 0xa6
'Yu ', # 0xa7
'Li ', # 0xa8
'Zao ', # 0xa9
'Lao ', # 0xaa
'Yi ', # 0xab
'Jiang ', # 0xac
'Pu ', # 0xad
'Jiao ', # 0xae
'Xi ', # 0xaf
'Tan ', # 0xb0
'Po ', # 0xb1
'Nong ', # 0xb2
'Yi ', # 0xb3
'Li ', # 0xb4
'Ju ', # 0xb5
'Jiao ', # 0xb6
'Yi ', # 0xb7
'Niang ', # 0xb8
'Ru ', # 0xb9
'Xun ', # 0xba
'Chou ', # 0xbb
'Yan ', # 0xbc
'Ling ', # 0xbd
'Mi ', # 0xbe
'Mi ', # 0xbf
'Niang ', # 0xc0
'Xin ', # 0xc1
'Jiao ', # 0xc2
'Xi ', # 0xc3
'Mi ', # 0xc4
'Yan ', # 0xc5
'Bian ', # 0xc6
'Cai ', # 0xc7
'Shi ', # 0xc8
'You ', # 0xc9
'Shi ', # 0xca
'Shi ', # 0xcb
'Li ', # 0xcc
'Zhong ', # 0xcd
'Ye ', # 0xce
'Liang ', # 0xcf
'Li ', # 0xd0
'Jin ', # 0xd1
'Jin ', # 0xd2
'Qiu ', # 0xd3
'Yi ', # 0xd4
'Diao ', # 0xd5
'Dao ', # 0xd6
'Zhao ', # 0xd7
'Ding ', # 0xd8
'Po ', # 0xd9
'Qiu ', # 0xda
'He ', # 0xdb
'Fu ', # 0xdc
'Zhen ', # 0xdd
'Zhi ', # 0xde
'Ba ', # 0xdf
'Luan ', # 0xe0
'Fu ', # 0xe1
'Nai ', # 0xe2
'Diao ', # 0xe3
'Shan ', # 0xe4
'Qiao ', # 0xe5
'Kou ', # 0xe6
'Chuan ', # 0xe7
'Zi ', # 0xe8
'Fan ', # 0xe9
'Yu ', # 0xea
'Hua ', # 0xeb
'Han ', # 0xec
'Gong ', # 0xed
'Qi ', # 0xee
'Mang ', # 0xef
'Ri ', # 0xf0
'Di ', # 0xf1
'Si ', # 0xf2
'Xi ', # 0xf3
'Yi ', # 0xf4
'Chai ', # 0xf5
'Shi ', # 0xf6
'Tu ', # 0xf7
'Xi ', # 0xf8
'Nu ', # 0xf9
'Qian ', # 0xfa
'Ishiyumi ', # 0xfb
'Jian ', # 0xfc
'Pi ', # 0xfd
'Ye ', # 0xfe
'Yin ', # 0xff
)
| gpl-3.0 |
liu602348184/django | tests/staticfiles_tests/settings.py | 147 | 1039 | from __future__ import unicode_literals
import os.path
from django.utils._os import upath
TEST_ROOT = os.path.dirname(upath(__file__))
TESTFILES_PATH = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': [
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
],
'STATICFILES_FINDERS': [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
],
'INSTALLED_APPS': [
'django.contrib.staticfiles',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
],
}
| bsd-3-clause |
gpoulin/luigi | test/contrib/sqla_test.py | 5 | 12958 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Gouthaman Balaraman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""
This file implements unit test cases for luigi/contrib/sqla.py
Author: Gouthaman Balaraman
Date: 01/02/2015
"""
import os
import shutil
import tempfile
import unittest
from luigi import six
import luigi
import sqlalchemy
from luigi.contrib import sqla
from luigi.mock import MockFile
from nose.plugins.attrib import attr
if six.PY3:
unicode = str
#################################
# Globals part of the test case #
#################################
TEMPDIR = tempfile.mkdtemp()
SQLITEPATH = os.path.join(TEMPDIR, "sqlatest.db")
CONNECTION_STRING = "sqlite:///%s" % SQLITEPATH
TASK_LIST = ["item%d\tproperty%d\n" % (i, i) for i in range(10)]
class BaseTask(luigi.Task):
def output(self):
return MockFile("BaseTask", mirror_on_stderr=True)
def run(self):
out = self.output().open("w")
for task in TASK_LIST:
out.write(task)
out.close()
class SQLATask(sqla.CopyToTable):
columns = [
(["item", sqlalchemy.String(64)], {}),
(["property", sqlalchemy.String(64)], {})
]
connection_string = CONNECTION_STRING
table = "item_property"
chunk_size = 1
def requires(self):
return BaseTask()
@attr('sqlalchemy')
class TestSQLA(unittest.TestCase):
NUM_WORKERS = 1
def _clear_tables(self):
meta = sqlalchemy.MetaData()
meta.reflect(bind=self.engine)
for table in reversed(meta.sorted_tables):
self.engine.execute(table.delete())
def setUp(self):
if not os.path.exists(TEMPDIR):
os.mkdir(TEMPDIR)
self.engine = sqlalchemy.create_engine(CONNECTION_STRING)
def tearDown(self):
self._clear_tables()
if os.path.exists(TEMPDIR):
shutil.rmtree(TEMPDIR)
def test_create_table(self):
"""
Test that this method creates table that we require
:return:
"""
class TestSQLData(sqla.CopyToTable):
connection_string = CONNECTION_STRING
table = "test_table"
columns = [
(["id", sqlalchemy.Integer], dict(primary_key=True)),
(["name", sqlalchemy.String(64)], {}),
(["value", sqlalchemy.String(64)], {})
]
chunk_size = 1
def output(self):
pass
sql_copy = TestSQLData()
eng = sqlalchemy.create_engine(TestSQLData.connection_string)
self.assertFalse(eng.dialect.has_table(eng.connect(), TestSQLData.table))
sql_copy.create_table(eng)
self.assertTrue(eng.dialect.has_table(eng.connect(), TestSQLData.table))
# repeat and ensure it just binds to existing table
sql_copy.create_table(eng)
def test_create_table_raises_no_columns(self):
"""
Check that the test fails when the columns are not set
:return:
"""
class TestSQLData(sqla.CopyToTable):
connection_string = CONNECTION_STRING
table = "test_table"
columns = []
chunk_size = 1
def output(self):
pass
sql_copy = TestSQLData()
eng = sqlalchemy.create_engine(TestSQLData.connection_string)
self.assertRaises(NotImplementedError, sql_copy.create_table, eng)
def _check_entries(self, engine):
with engine.begin() as conn:
meta = sqlalchemy.MetaData()
meta.reflect(bind=engine)
self.assertEqual(set([u'table_updates', u'item_property']), set(meta.tables.keys()))
table = meta.tables[SQLATask.table]
s = sqlalchemy.select([sqlalchemy.func.count(table.c.item)])
result = conn.execute(s).fetchone()
self.assertEqual(len(TASK_LIST), result[0])
s = sqlalchemy.select([table]).order_by(table.c.item)
result = conn.execute(s).fetchall()
for i in range(len(TASK_LIST)):
given = TASK_LIST[i].strip("\n").split("\t")
given = (unicode(given[0]), unicode(given[1]))
self.assertEqual(given, tuple(result[i]))
def test_rows(self):
task, task0 = SQLATask(), BaseTask()
luigi.build([task, task0], local_scheduler=True, workers=self.NUM_WORKERS)
for i, row in enumerate(task.rows()):
given = TASK_LIST[i].strip("\n").split("\t")
self.assertEqual(row, given)
def test_run(self):
"""
Checking that the runs go as expected. Rerunning the same shouldn't end up
inserting more rows into the db.
:return:
"""
task, task0 = SQLATask(), BaseTask()
self.engine = sqlalchemy.create_engine(task.connection_string)
luigi.build([task0, task], local_scheduler=True)
self._check_entries(self.engine)
# rerun and the num entries should be the same
luigi.build([task0, task], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_run_with_chunk_size(self):
"""
The chunk_size can be specified in order to control the batch size for inserts.
:return:
"""
task, task0 = SQLATask(), BaseTask()
self.engine = sqlalchemy.create_engine(task.connection_string)
task.chunk_size = 2 # change chunk size and check it runs ok
luigi.build([task, task0], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_reflect(self):
"""
If the table is setup already, then one can set reflect to True, and
completely skip the columns part. It is not even required at that point.
:return:
"""
class AnotherSQLATask(sqla.CopyToTable):
connection_string = CONNECTION_STRING
table = "item_property"
reflect = True
chunk_size = 1
def requires(self):
return SQLATask()
def copy(self, conn, ins_rows, table_bound):
ins = table_bound.update().\
where(table_bound.c.property == sqlalchemy.bindparam("_property")).\
values({table_bound.c.item: sqlalchemy.bindparam("_item")})
conn.execute(ins, ins_rows)
def rows(self):
for line in TASK_LIST:
yield line.strip("\n").split("\t")
task0, task1, task2 = AnotherSQLATask(), SQLATask(), BaseTask()
luigi.build([task0, task1, task2], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_create_marker_table(self):
"""
Is the marker table created as expected for the SQLAlchemyTarget
:return:
"""
target = sqla.SQLAlchemyTarget(CONNECTION_STRING, "test_table", "12312123")
target.create_marker_table()
self.assertTrue(target.engine.dialect.has_table(target.engine.connect(), target.marker_table))
def test_touch(self):
"""
Touch takes care of creating a checkpoint for task completion
:return:
"""
target = sqla.SQLAlchemyTarget(CONNECTION_STRING, "test_table", "12312123")
target.create_marker_table()
self.assertFalse(target.exists())
target.touch()
self.assertTrue(target.exists())
def test_row_overload(self):
"""Overload the rows method and we should be able to insert data into database"""
class SQLARowOverloadTest(sqla.CopyToTable):
columns = [
(["item", sqlalchemy.String(64)], {}),
(["property", sqlalchemy.String(64)], {})
]
connection_string = CONNECTION_STRING
table = "item_property"
chunk_size = 1
def rows(self):
tasks = [("item0", "property0"), ("item1", "property1"), ("item2", "property2"), ("item3", "property3"),
("item4", "property4"), ("item5", "property5"), ("item6", "property6"), ("item7", "property7"),
("item8", "property8"), ("item9", "property9")]
for row in tasks:
yield row
task = SQLARowOverloadTest()
luigi.build([task], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_column_row_separator(self):
"""
Test alternate column row separator works
:return:
"""
class ModBaseTask(luigi.Task):
def output(self):
return MockFile("ModBaseTask", mirror_on_stderr=True)
def run(self):
out = self.output().open("w")
tasks = ["item%d,property%d\n" % (i, i) for i in range(10)]
for task in tasks:
out.write(task)
out.close()
class ModSQLATask(sqla.CopyToTable):
columns = [
(["item", sqlalchemy.String(64)], {}),
(["property", sqlalchemy.String(64)], {})
]
connection_string = CONNECTION_STRING
table = "item_property"
column_separator = ","
chunk_size = 1
def requires(self):
return ModBaseTask()
task1, task2 = ModBaseTask(), ModSQLATask()
luigi.build([task1, task2], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_update_rows_test(self):
"""
Overload the copy() method and implement an update action.
:return:
"""
class ModBaseTask(luigi.Task):
def output(self):
return MockFile("BaseTask", mirror_on_stderr=True)
def run(self):
out = self.output().open("w")
for task in TASK_LIST:
out.write("dummy_" + task)
out.close()
class ModSQLATask(sqla.CopyToTable):
connection_string = CONNECTION_STRING
table = "item_property"
columns = [
(["item", sqlalchemy.String(64)], {}),
(["property", sqlalchemy.String(64)], {})
]
chunk_size = 1
def requires(self):
return ModBaseTask()
class UpdateSQLATask(sqla.CopyToTable):
connection_string = CONNECTION_STRING
table = "item_property"
reflect = True
chunk_size = 1
def requires(self):
return ModSQLATask()
def copy(self, conn, ins_rows, table_bound):
ins = table_bound.update().\
where(table_bound.c.property == sqlalchemy.bindparam("_property")).\
values({table_bound.c.item: sqlalchemy.bindparam("_item")})
conn.execute(ins, ins_rows)
def rows(self):
for task in TASK_LIST:
yield task.strip("\n").split("\t")
# Running only task1, and task2 should fail
task1, task2, task3 = ModBaseTask(), ModSQLATask(), UpdateSQLATask()
luigi.build([task1, task2, task3], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
def test_multiple_tasks(self):
"""
Test a case where there are multiple tasks
:return:
"""
class SmallSQLATask(sqla.CopyToTable):
item = luigi.Parameter()
property = luigi.Parameter()
columns = [
(["item", sqlalchemy.String(64)], {}),
(["property", sqlalchemy.String(64)], {})
]
connection_string = CONNECTION_STRING
table = "item_property"
chunk_size = 1
def rows(self):
yield (self.item, self.property)
class ManyBaseTask(luigi.Task):
def requires(self):
for t in TASK_LIST:
item, property = t.strip().split("\t")
yield SmallSQLATask(item=item, property=property)
task2 = ManyBaseTask()
luigi.build([task2], local_scheduler=True, workers=self.NUM_WORKERS)
self._check_entries(self.engine)
@attr('sqlalchemy')
class TestSQLA2(TestSQLA):
""" 2 workers version
"""
NUM_WORKERS = 2
| apache-2.0 |
Supermaster34/3.0-Kernel-Galaxy-Player-US | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
precedenceguo/mxnet | example/image-classification/symbols/inception-bn.py | 57 | 7598 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception + BN, suitable for images with around 224 x 224
Reference:
Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep
network training by reducing internal covariate shift. arXiv preprint
arXiv:1502.03167, 2015.
"""
import mxnet as mx
eps = 1e-10 + 1e-5
bn_mom = 0.9
fix_gamma = False
def ConvFactory(data, num_filter, kernel, stride=(1,1), pad=(0, 0), name=None, suffix='', attr={}):
conv = mx.symbol.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_%s%s' %(name, suffix))
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=fix_gamma, eps=eps, momentum=bn_mom, name='bn_%s%s' %(name, suffix))
act = mx.symbol.Activation(data=bn, act_type='relu', name='relu_%s%s' %(name, suffix), attr=attr)
return act
def InceptionFactoryA(data, num_1x1, num_3x3red, num_3x3, num_d3x3red, num_d3x3, pool, proj, name):
# 1x1
c1x1 = ConvFactory(data=data, num_filter=num_1x1, kernel=(1, 1), name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool, name=('%s_pool_%s_pool' % (pool, name)))
cproj = ConvFactory(data=pooling, num_filter=proj, kernel=(1, 1), name=('%s_proj' % name))
# concat
concat = mx.symbol.Concat(*[c1x1, c3x3, cd3x3, cproj], name='ch_concat_%s_chconcat' % name)
return concat
def InceptionFactoryB(data, num_3x3red, num_3x3, num_d3x3red, num_d3x3, name):
# 3x3 reduce + 3x3
c3x3r = ConvFactory(data=data, num_filter=num_3x3red, kernel=(1, 1), name=('%s_3x3' % name), suffix='_reduce')
c3x3 = ConvFactory(data=c3x3r, num_filter=num_3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd3x3r = ConvFactory(data=data, num_filter=num_d3x3red, kernel=(1, 1), name=('%s_double_3x3' % name), suffix='_reduce')
cd3x3 = ConvFactory(data=cd3x3r, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name=('%s_double_3x3_0' % name))
cd3x3 = ConvFactory(data=cd3x3, num_filter=num_d3x3, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=('%s_double_3x3_1' % name))
# pool + proj
pooling = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type="max", name=('max_pool_%s_pool' % name))
# concat
concat = mx.symbol.Concat(*[c3x3, cd3x3, pooling], name='ch_concat_%s_chconcat' % name)
return concat
# A Simple Downsampling Factory
def DownsampleFactory(data, ch_3x3, name, attr):
# conv 3x3
conv = ConvFactory(data=data, name=name+'_conv',kernel=(3, 3), stride=(2, 2), num_filter=ch_3x3, pad=(1, 1), attr=attr)
# pool
pool = mx.symbol.Pooling(data=data, name=name+'_pool',kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', attr=attr)
# concat
concat = mx.symbol.Concat(*[conv, pool], name=name+'_ch_concat')
return concat
# A Simple module
def SimpleFactory(data, ch_1x1, ch_3x3, name, attr):
# 1x1
conv1x1 = ConvFactory(data=data, name=name+'_1x1', kernel=(1, 1), pad=(0, 0), num_filter=ch_1x1, attr=attr)
# 3x3
conv3x3 = ConvFactory(data=data, name=name+'_3x3', kernel=(3, 3), pad=(1, 1), num_filter=ch_3x3, attr=attr)
#concat
concat = mx.symbol.Concat(*[conv1x1, conv3x3], name=name+'_ch_concat')
return concat
def get_symbol(num_classes, image_shape, **kwargs):
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
# attr = {'force_mirroring': 'true'}
attr = {}
# data
data = mx.symbol.Variable(name="data")
if height <= 28:
# a simper version
conv1 = ConvFactory(data=data, kernel=(3,3), pad=(1,1), name="1", num_filter=96, attr=attr)
in3a = SimpleFactory(conv1, 32, 32, 'in3a', attr)
in3b = SimpleFactory(in3a, 32, 48, 'in3b', attr)
in3c = DownsampleFactory(in3b, 80, 'in3c', attr)
in4a = SimpleFactory(in3c, 112, 48, 'in4a', attr)
in4b = SimpleFactory(in4a, 96, 64, 'in4b', attr)
in4c = SimpleFactory(in4b, 80, 80, 'in4c', attr)
in4d = SimpleFactory(in4c, 48, 96, 'in4d', attr)
in4e = DownsampleFactory(in4d, 96, 'in4e', attr)
in5a = SimpleFactory(in4e, 176, 160, 'in5a', attr)
in5b = SimpleFactory(in5a, 176, 160, 'in5b', attr)
pool = mx.symbol.Pooling(data=in5b, pool_type="avg", kernel=(7,7), name="global_pool", attr=attr)
else:
# stage 1
conv1 = ConvFactory(data=data, num_filter=64, kernel=(7, 7), stride=(2, 2), pad=(3, 3), name='1')
pool1 = mx.symbol.Pooling(data=conv1, kernel=(3, 3), stride=(2, 2), name='pool_1', pool_type='max')
# stage 2
conv2red = ConvFactory(data=pool1, num_filter=64, kernel=(1, 1), stride=(1, 1), name='2_red')
conv2 = ConvFactory(data=conv2red, num_filter=192, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='2')
pool2 = mx.symbol.Pooling(data=conv2, kernel=(3, 3), stride=(2, 2), name='pool_2', pool_type='max')
# stage 2
in3a = InceptionFactoryA(pool2, 64, 64, 64, 64, 96, "avg", 32, '3a')
in3b = InceptionFactoryA(in3a, 64, 64, 96, 64, 96, "avg", 64, '3b')
in3c = InceptionFactoryB(in3b, 128, 160, 64, 96, '3c')
# stage 3
in4a = InceptionFactoryA(in3c, 224, 64, 96, 96, 128, "avg", 128, '4a')
in4b = InceptionFactoryA(in4a, 192, 96, 128, 96, 128, "avg", 128, '4b')
in4c = InceptionFactoryA(in4b, 160, 128, 160, 128, 160, "avg", 128, '4c')
in4d = InceptionFactoryA(in4c, 96, 128, 192, 160, 192, "avg", 128, '4d')
in4e = InceptionFactoryB(in4d, 128, 192, 192, 256, '4e')
# stage 4
in5a = InceptionFactoryA(in4e, 352, 192, 320, 160, 224, "avg", 128, '5a')
in5b = InceptionFactoryA(in5a, 352, 192, 320, 192, 224, "max", 128, '5b')
# global avg pooling
pool = mx.symbol.Pooling(data=in5b, kernel=(7, 7), stride=(1, 1), name="global_pool", pool_type='avg')
# linear classifier
flatten = mx.symbol.Flatten(data=pool)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes)
softmax = mx.symbol.SoftmaxOutput(data=fc1, name='softmax')
return softmax
| apache-2.0 |
openfun/edx-platform | common/lib/xmodule/xmodule/randomize_module.py | 54 | 3690 | import logging
import random
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.seq_module import SequenceDescriptor
from lxml import etree
from xblock.fields import Scope, Integer
from xblock.fragment import Fragment
log = logging.getLogger('edx.' + __name__)
class RandomizeFields(object):
choice = Integer(help="Which random child was chosen", scope=Scope.user_state)
class RandomizeModule(RandomizeFields, XModule):
"""
Chooses a random child module. Chooses the same one every time for each student.
Example:
<randomize>
<problem url_name="problem1" />
<problem url_name="problem2" />
<problem url_name="problem3" />
</randomize>
User notes:
- If you're randomizing amongst graded modules, each of them MUST be worth the same
number of points. Otherwise, the earth will be overrun by monsters from the
deeps. You have been warned.
Technical notes:
- There is more dark magic in this code than I'd like. The whole varying-children +
grading interaction is a tangle between super and subclasses of descriptors and
modules.
"""
def __init__(self, *args, **kwargs):
super(RandomizeModule, self).__init__(*args, **kwargs)
# NOTE: calling self.get_children() creates a circular reference--
# it calls get_child_descriptors() internally, but that doesn't work until
# we've picked a choice
num_choices = len(self.descriptor.get_children())
if self.choice > num_choices:
# Oops. Children changed. Reset.
self.choice = None
if self.choice is None:
# choose one based on the system seed, or randomly if that's not available
if num_choices > 0:
if self.system.seed is not None:
self.choice = self.system.seed % num_choices
else:
self.choice = random.randrange(0, num_choices)
if self.choice is not None:
self.child_descriptor = self.descriptor.get_children()[self.choice]
# Now get_children() should return a list with one element
log.debug("children of randomize module (should be only 1): %s",
self.get_children())
self.child = self.get_children()[0]
else:
self.child_descriptor = None
self.child = None
def get_child_descriptors(self):
"""
For grading--return just the chosen child.
"""
if self.child_descriptor is None:
return []
return [self.child_descriptor]
def student_view(self, context):
if self.child is None:
# raise error instead? In fact, could complain on descriptor load...
return Fragment(content=u"<div>Nothing to randomize between</div>")
return self.child.render(STUDENT_VIEW, context)
def get_icon_class(self):
return self.child.get_icon_class() if self.child else 'other'
class RandomizeDescriptor(RandomizeFields, SequenceDescriptor):
# the editing interface can be the same as for sequences -- just a container
module_class = RandomizeModule
filename_extension = "xml"
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('randomize')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
def has_dynamic_children(self):
"""
Grading needs to know that only one of the children is actually "real". This
makes it use module.get_child_descriptors().
"""
return True
| agpl-3.0 |
chokribr/PIST | modules/webcomment/lib/webcomment_washer.py | 27 | 4062 | from invenio.htmlutils import HTMLWasher
import htmlentitydefs
import cgi
import re
RE_HTML_FIRST_NON_QUOTATION_CHAR_ON_LINE = re.compile('[^>]')
class EmailWasher(HTMLWasher):
"""
Wash comments before being sent by email
"""
line_quotation = ''
def handle_starttag(self, tag, attrs):
"""Function called for new opening tags"""
if tag.lower() in self.allowed_tag_whitelist:
if tag.lower() == 'ol':
# we need a list to store the last
# number used in the previous ordered lists
self.previous_nbs.append(self.nb)
self.nb = 0
# we need to know which is the tag list
self.previous_type_lists.append(tag.lower())
# we must remove any non-relevant spacing and end of
# line before
self.result = self.result.rstrip()
elif tag.lower() == 'ul':
self.previous_type_lists.append(tag.lower())
# we must remove any non-relevant spacing and end of
# line before
self.result = self.result.rstrip()
elif tag.lower() == 'li':
# we must remove any non-relevant spacing and end of
# line before
self.result = self.result.rstrip()
if self.previous_type_lists[-1] == 'ol':
self.nb += 1
self.result += '\n' + self.line_quotation + \
' ' * len(self.previous_type_lists) + str(self.nb) + '. '
else:
self.result += '\n' + self.line_quotation + \
' ' * len(self.previous_type_lists) + '* '
elif tag.lower() == 'a':
#self.previous_type_lists.append(tag.lower())
for (attr, value) in attrs:
if attr.lower() == 'href':
self.url = value
self.result += '<' + value + '>'
def handle_data(self, data):
"""Function called for text nodes"""
if not self.silent:
if self.url:
if self.url == data:
data = ''
else:
data = '(' + data + ')'
self.url = ''
self.result += cgi.escape(data, True)
lines = data.splitlines()
if len(lines) > 1:
match_obj = RE_HTML_FIRST_NON_QUOTATION_CHAR_ON_LINE.search(lines[-1])
if match_obj:
self.line_quotation = '>' * match_obj.start()
else:
self.line_quotation = ''
def handle_endtag(self, tag):
"""Function called for ending of tags"""
if tag.lower() in self.allowed_tag_whitelist:
if tag.lower() in ['ul', 'ol']:
self.previous_type_lists = self.previous_type_lists[:-1]
if tag.lower() == 'ol':
self.nb = self.previous_nbs[-1]
self.previous_nbs = self.previous_nbs[:-1]
# we must remove any non-relevant spacing and end of
# line before
self.result = self.result.rstrip()
self.result += '\n' + self.line_quotation
def handle_startendtag(self, tag, attrs):
"""Function called for empty tags (e.g. <br />)"""
self.result += ""
def handle_charref(self, name):
"""Process character references of the form "&#ref;". Transform to text whenever possible."""
try:
self.result += unichr(int(name)).encode("utf-8")
except:
return
def handle_entityref(self, name):
"""Process a general entity reference of the form "&name;".
Transform to text whenever possible."""
char_code = htmlentitydefs.name2codepoint.get(name, None)
if char_code is not None:
try:
self.result += unichr(char_code).encode("utf-8")
except:
return
| gpl-2.0 |
Changaco/oh-mainline | vendor/packages/Pygments/pygments/lexers/foxpro.py | 335 | 26220 | # -*- coding: utf-8 -*-
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
*New in Pygments 1.6.*
"""
name = 'FoxPro'
aliases = ['Clipper', 'XBase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
| agpl-3.0 |
Evfro/polara | polara/recommender/external/turi/turiwrapper.py | 1 | 8039 | import numpy as np
import turicreate as tc
from polara import RecommenderModel
class TuriFactorizationRecommender(RecommenderModel):
def __init__(self, *args, **kwargs):
self.item_side_info = kwargs.pop('item_side_info', None)
self.user_side_info = kwargs.pop('user_side_info', None)
super().__init__(*args, **kwargs)
self.tc_model = None
self._rank = 10
self.method = 'TCF'
# side data
self._item_data = None
self._user_data = None
self.side_data_factorization = True
# randomization
self.seed = 61
# optimization
self.binary_target = False
self.solver = 'auto'
self.max_iterations = 25
# regularization
self.regularization = 1e-10
self.linear_regularization = 1e-10
# adagrad
self.adagrad_momentum_weighting = 0.9
# sgd
self.sgd_step_size = 0
# ranking
self.ranking_optimization = False
self.ranking_regularization = 0.25
self.unobserved_rating_value = None
self.num_sampled_negative_examples = 4
# other parameters
self.with_data_feedback = True
self.other_tc_params = {}
self.data.subscribe(self.data.on_change_event, self._clean_metadata)
def _clean_metadata(self):
self._item_data = None
self._user_data = None
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, new_value):
if new_value != self._rank:
self._rank = new_value
self._is_ready = False
self._recommendations = None
num_factors = rank # convenience
@property
def item_data(self):
if self.item_side_info is not None:
if self._item_data is None:
itemid = self.data.fields.itemid
item_side_info = self.item_side_info
index_data = getattr(self.data.index, 'itemid')
try:
item_index = index_data.training
except AttributeError:
item_index = index_data
index_map = item_index.set_index('old').new
side_features = (item_side_info.loc[item_index['old']]
.reset_index())
side_features[itemid] = side_features[itemid].map(index_map)
self._item_data = tc.SFrame(side_features)
else:
self._item_data = None
return self._item_data
@property
def user_data(self):
if self.user_side_info is not None:
if self._user_data is None:
userid = self.data.fields.userid
user_side_info = self.user_side_info
index_data = getattr(self.data.index, 'userid')
try:
user_index = index_data.training
except AttributeError:
user_index = index_data
index_map = user_index.set_index('old').new
side_features = (user_side_info.loc[user_index['old']]
.reset_index())
side_features[userid] = side_features[userid].map(index_map)
self._user_data = tc.SFrame(side_features)
else:
self._user_data = None
return self._user_data
def build(self):
item_data = self.item_data
user_data = self.user_data
params = dict(item_data=item_data,
user_data=user_data,
side_data_factorization=self.side_data_factorization,
num_factors=self.rank,
binary_target=self.binary_target,
verbose=self.verbose,
# initialization
random_seed=self.seed,
init_random_sigma=0.1,
# optimization
solver=self.solver,
max_iterations=self.max_iterations,
# adagrad
adagrad_momentum_weighting=self.adagrad_momentum_weighting,
# sgd
sgd_step_size=self.sgd_step_size,
# regularization
regularization=self.regularization,
linear_regularization=self.linear_regularization,
# other parameters
**self.other_tc_params)
if self.ranking_optimization:
build_model = tc.recommender.ranking_factorization_recommender.create
params.update(ranking_regularization=self.ranking_regularization,
num_sampled_negative_examples=self.num_sampled_negative_examples)
if self.unobserved_rating_value is not None:
params.update(unobserved_rating_value=self.unobserved_rating_value)
else:
build_model = tc.factorization_recommender.create
target = self.data.fields.feedback if self.with_data_feedback else None
self.tc_model = build_model(tc.SFrame(self.data.training),
user_id=self.data.fields.userid,
item_id=self.data.fields.itemid,
target=target,
**params)
if self.training_time is not None:
self.training_time.append(self.tc_model.training_time)
if self.verbose:
print(f'{self.method} training time: {self.tc_model.training_time}s')
def get_recommendations(self):
if self.data.warm_start:
raise NotImplementedError
userid = self.data.fields.userid
test_users = self.data.test.holdout[userid].drop_duplicates().values
top_recs = self.tc_model.recommend(users=test_users,
k=self.topk,
exclude_known=self.filter_seen,
verbose=self.verbose)
itemid = self.data.fields.itemid
top_recs = top_recs[itemid].to_numpy().reshape(-1, self.topk)
return top_recs
def evaluate_rmse(self):
if self.data.warm_start:
raise NotImplementedError
feedback = self.data.fields.feedback
holdout = tc.SFrame(self.data.test.holdout)
return self.tc_model.evaluate_rmse(holdout, feedback)['rmse_overall']
class WarmStartRecommendationsMixin:
def get_recommendations(self):
pass
class ColdStartRecommendationsMixin:
def get_recommendations(self):
userid = self.data.fields.userid
itemid = self.data.fields.itemid
data_index = self.data.index
cold_items_index = data_index.itemid.cold_start.old.values
lower_index = data_index.itemid.training.new.max() + 1
upper_index = lower_index + len(cold_items_index)
# prevent intersecting cold items index with known items
unseen_items_idx = np.arange(lower_index, upper_index)
new_item_data = tc.SFrame(self.item_side_info.loc[cold_items_index]
.reset_index()
.assign(**{itemid: unseen_items_idx}))
repr_users = self.data.representative_users
try:
repr_users = repr_users.new.values
except AttributeError:
repr_users = data_index.userid.training.new.values
observation_idx = [a.flat for a in np.broadcast_arrays(repr_users, unseen_items_idx[:, None])]
new_observation = tc.SFrame(dict(zip([userid, itemid], observation_idx)))
scores = self.tc_model.predict(new_observation, new_item_data=new_item_data).to_numpy()
top_similar_idx = self.get_topk_elements(scores.reshape(-1, len(repr_users)))
top_similar_users = repr_users[top_similar_idx.ravel()].reshape(top_similar_idx.shape)
return top_similar_users
| mit |
philoniare/horizon | openstack_dashboard/dashboards/identity/groups/urls.py | 64 | 1248 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.identity.groups import views
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create$', views.CreateView.as_view(), name='create'),
url(r'^(?P<group_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<group_id>[^/]+)/manage_members/$',
views.ManageMembersView.as_view(), name='manage_members'),
url(r'^(?P<group_id>[^/]+)/add_members/$',
views.NonMembersView.as_view(), name='add_members'),
)
| apache-2.0 |
2014c2g2/2015cdag2_0421 | static/Brython3.1.1-20150328-091302/Lib/ui/widget.py | 706 | 1774 | import random
from browser import doc
def getMousePosition(e):
if e is None:
e=win.event
if e.pageX or e.pageY:
return {'x': e.pageX, 'y': e.pageY}
if e.clientX or e.clientY:
_posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft;
_posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop;
return {'x': _posx, 'y': _posy}
return {'x': 0, 'y': 0}
class Widget:
def __init__(self, element, type, id=None):
self._element=element
if id is None:
self._element.id='%s_%s' % (type, int(100000*random.random()))
else:
self._element.id=id
def get_id(self):
return self._element.id
def attach(self, element_id):
""" append this DOM component to DOM element element_id"""
#document[element_id] <= self._element #this doesn't work :(
#doc is actually the global 'doc' not the one we imported from browser :(
doc[element_id] <= self._element
def show(self):
self._element.display='block'
def hide(self):
self._element.display='none'
class DraggableWidget(Widget):
def __init__(self, element, type, id=None):
Widget.__init__(self, element, type, id)
def drag(e):
self._element.style.top='%spx' % (e.clientY - self._deltaY)
self._element.style.left='%spx' % (e.clientX - self._deltaX)
def mouseDown(e):
self._element.style.position='absolute'
self._deltaX=e.clientX - self._element.offsetLeft
self._deltaY=e.clientY - self._element.offsetTop
doc.bind('mousemove', drag)
def mouseUp(e):
doc.unbind('mousemove')
self._element.bind('mousedown', mouseDown)
self._element.bind('mouseup', mouseUp)
| agpl-3.0 |
JIoJIaJIu/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/handlers.py | 89 | 13047 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = urllib.unquote(request.url_parts.path)
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</ul>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a></li>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
ml_extensions = {".html", ".htm", ".xht", ".xhtml", ".xml", ".svg"}
escape_type = "html" if os.path.splitext(path)[1] in ml_extensions else "none"
pipeline = Pipeline("sub(%s)" % escape_type)
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data, escape_type="none")
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
| mpl-2.0 |
OpenUpgrade/OpenUpgrade | addons/project_timesheet/__init__.py | 441 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_timesheet
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SujaySKumar/django | tests/db_functions/tests.py | 193 | 18491 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models import CharField, TextField, Value as V
from django.db.models.expressions import RawSQL
from django.db.models.functions import (
Coalesce, Concat, Greatest, Least, Length, Lower, Now, Substr, Upper,
)
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import six, timezone
from .models import Article, Author, Fan
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
def truncate_microseconds(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
class FunctionTests(TestCase):
def test_coalesce(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(display_name=Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'Rhonda',
],
lambda a: a.display_name
)
with self.assertRaisesMessage(ValueError, 'Coalesce must take at least two expressions'):
Author.objects.annotate(display_name=Coalesce('alias'))
def test_coalesce_mixed_values(self):
a1 = Author.objects.create(name='John Smith', alias='smithj')
a2 = Author.objects.create(name='Rhonda')
ar1 = Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce('summary', 'text', output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum,
],
lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(Lower('summary'), Lower('text'), output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [
lorem_ipsum.lower(),
],
lambda a: a.headline
)
def test_coalesce_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.order_by(Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors, [
'Rhonda',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').asc())
self.assertQuerysetEqual(
authors, [
'Rhonda',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').desc())
self.assertQuerysetEqual(
authors, [
'John Smith',
'Rhonda',
],
lambda a: a.name
)
def test_greatest(self):
now = timezone.now()
before = now - timedelta(hours=1)
Article.objects.create(
title="Testing with Django",
written=before,
published=now,
)
articles = Article.objects.annotate(
last_updated=Greatest('written', 'published'),
)
self.assertEqual(articles.first().last_updated, truncate_microseconds(now))
@skipUnlessDBFeature('greatest_least_ignores_nulls')
def test_greatest_ignores_null(self):
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
last_updated=Greatest('written', 'published'),
)
self.assertEqual(articles.first().last_updated, now)
@skipIfDBFeature('greatest_least_ignores_nulls')
def test_greatest_propogates_null(self):
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
last_updated=Greatest('written', 'published'),
)
self.assertIsNone(articles.first().last_updated)
@skipIf(connection.vendor == 'mysql', "This doesn't work on MySQL")
def test_greatest_coalesce_workaround(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past),
Coalesce('published', past),
),
)
self.assertEqual(articles.first().last_updated, now)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific workaround")
def test_greatest_coalesce_workaround_mysql(self):
past = datetime(1900, 1, 1)
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
past_sql = RawSQL("cast(%s as datetime)", (past,))
articles = Article.objects.annotate(
last_updated=Greatest(
Coalesce('written', past_sql),
Coalesce('published', past_sql),
),
)
self.assertEqual(articles.first().last_updated, truncate_microseconds(now))
def test_greatest_all_null(self):
Article.objects.create(title="Testing with Django", written=timezone.now())
articles = Article.objects.annotate(last_updated=Greatest('published', 'updated'))
self.assertIsNone(articles.first().last_updated)
def test_greatest_one_expressions(self):
with self.assertRaisesMessage(ValueError, 'Greatest must take at least two expressions'):
Greatest('written')
def test_greatest_related_field(self):
author = Author.objects.create(name='John Smith', age=45)
Fan.objects.create(name='Margaret', age=50, author=author)
authors = Author.objects.annotate(
highest_age=Greatest('age', 'fans__age'),
)
self.assertEqual(authors.first().highest_age, 50)
def test_greatest_update(self):
author = Author.objects.create(name='James Smith', goes_by='Jim')
Author.objects.update(alias=Greatest('name', 'goes_by'))
author.refresh_from_db()
self.assertEqual(author.alias, 'Jim')
def test_least(self):
now = timezone.now()
before = now - timedelta(hours=1)
Article.objects.create(
title="Testing with Django",
written=before,
published=now,
)
articles = Article.objects.annotate(
first_updated=Least('written', 'published'),
)
self.assertEqual(articles.first().first_updated, truncate_microseconds(before))
@skipUnlessDBFeature('greatest_least_ignores_nulls')
def test_least_ignores_null(self):
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
first_updated=Least('written', 'published'),
)
self.assertEqual(articles.first().first_updated, now)
@skipIfDBFeature('greatest_least_ignores_nulls')
def test_least_propogates_null(self):
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
first_updated=Least('written', 'published'),
)
self.assertIsNone(articles.first().first_updated)
@skipIf(connection.vendor == 'mysql', "This doesn't work on MySQL")
def test_least_coalesce_workaround(self):
future = datetime(2100, 1, 1)
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
articles = Article.objects.annotate(
last_updated=Least(
Coalesce('written', future),
Coalesce('published', future),
),
)
self.assertEqual(articles.first().last_updated, now)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific workaround")
def test_least_coalesce_workaround_mysql(self):
future = datetime(2100, 1, 1)
now = timezone.now()
Article.objects.create(title="Testing with Django", written=now)
future_sql = RawSQL("cast(%s as datetime)", (future,))
articles = Article.objects.annotate(
last_updated=Least(
Coalesce('written', future_sql),
Coalesce('published', future_sql),
),
)
self.assertEqual(articles.first().last_updated, truncate_microseconds(now))
def test_least_all_null(self):
Article.objects.create(title="Testing with Django", written=timezone.now())
articles = Article.objects.annotate(first_updated=Least('published', 'updated'))
self.assertIsNone(articles.first().first_updated)
def test_least_one_expressions(self):
with self.assertRaisesMessage(ValueError, 'Least must take at least two expressions'):
Least('written')
def test_least_related_field(self):
author = Author.objects.create(name='John Smith', age=45)
Fan.objects.create(name='Margaret', age=50, author=author)
authors = Author.objects.annotate(
lowest_age=Least('age', 'fans__age'),
)
self.assertEqual(authors.first().lowest_age, 45)
def test_least_update(self):
author = Author.objects.create(name='James Smith', goes_by='Jim')
Author.objects.update(alias=Least('name', 'goes_by'))
author.refresh_from_db()
self.assertEqual(author.alias, 'James Smith')
def test_concat(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(joined=Concat('alias', 'goes_by'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'',
'smithjJohn',
'Maggie',
'adnohR',
],
lambda a: a.joined
)
with self.assertRaisesMessage(ValueError, 'Concat must take at least two expressions'):
Author.objects.annotate(joined=Concat('alias'))
def test_concat_many(self):
Author.objects.create(name='Jayden')
Author.objects.create(name='John Smith', alias='smithj', goes_by='John')
Author.objects.create(name='Margaret', goes_by='Maggie')
Author.objects.create(name='Rhonda', alias='adnohR')
authors = Author.objects.annotate(
joined=Concat('name', V(' ('), 'goes_by', V(')'), output_field=CharField()),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'Jayden ()',
'John Smith (John)',
'Margaret (Maggie)',
'Rhonda ()',
],
lambda a: a.joined
)
def test_concat_mixed_char_text(self):
Article.objects.create(title='The Title', text=lorem_ipsum, written=timezone.now())
article = Article.objects.annotate(
title_text=Concat('title', V(' - '), 'text', output_field=TextField()),
).get(title='The Title')
self.assertEqual(article.title + ' - ' + article.text, article.title_text)
# wrap the concat in something else to ensure that we're still
# getting text rather than bytes
article = Article.objects.annotate(
title_text=Upper(Concat('title', V(' - '), 'text', output_field=TextField())),
).get(title='The Title')
expected = article.title + ' - ' + article.text
self.assertEqual(expected.upper(), article.title_text)
def test_lower(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(lower_name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'john smith',
'rhonda',
],
lambda a: a.lower_name
)
Author.objects.update(name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('john smith', 'john smith'),
('rhonda', 'rhonda'),
],
lambda a: (a.lower_name, a.name)
)
def test_upper(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(upper_name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
'JOHN SMITH',
'RHONDA',
],
lambda a: a.upper_name
)
Author.objects.update(name=Upper('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('JOHN SMITH', 'JOHN SMITH'),
('RHONDA', 'RHONDA'),
],
lambda a: (a.upper_name, a.name)
)
def test_length(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(
name_length=Length('name'),
alias_length=Length('alias'))
self.assertQuerysetEqual(
authors.order_by('name'), [
(10, 6),
(6, None),
],
lambda a: (a.name_length, a.alias_length)
)
self.assertEqual(authors.filter(alias_length__lte=Length('name')).count(), 1)
def test_length_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='John Smith', alias='smithj1')
Author.objects.create(name='Rhonda', alias='ronny')
authors = Author.objects.order_by(Length('name'), Length('alias'))
self.assertQuerysetEqual(
authors, [
('Rhonda', 'ronny'),
('John Smith', 'smithj'),
('John Smith', 'smithj1'),
],
lambda a: (a.name, a.alias)
)
def test_substr(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(name_part=Substr('name', 5, 3))
self.assertQuerysetEqual(
authors.order_by('name'), [
' Sm',
'da',
],
lambda a: a.name_part
)
authors = Author.objects.annotate(name_part=Substr('name', 2))
self.assertQuerysetEqual(
authors.order_by('name'), [
'ohn Smith',
'honda',
],
lambda a: a.name_part
)
# if alias is null, set to first 5 lower characters of the name
Author.objects.filter(alias__isnull=True).update(
alias=Lower(Substr('name', 1, 5)),
)
self.assertQuerysetEqual(
authors.order_by('name'), [
'smithj',
'rhond',
],
lambda a: a.alias
)
def test_substr_start(self):
Author.objects.create(name='John Smith', alias='smithj')
a = Author.objects.annotate(
name_part_1=Substr('name', 1),
name_part_2=Substr('name', 2),
).get(alias='smithj')
self.assertEqual(a.name_part_1[1:], a.name_part_2)
with six.assertRaisesRegex(self, ValueError, "'pos' must be greater than 0"):
Author.objects.annotate(raises=Substr('name', 0))
def test_substr_with_expressions(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(name_part=Substr('name', 5, 3))
self.assertQuerysetEqual(
authors.order_by('name'), [
' Sm',
'da',
],
lambda a: a.name_part
)
def test_nested_function_ordering(self):
Author.objects.create(name='John Smith')
Author.objects.create(name='Rhonda Simpson', alias='ronny')
authors = Author.objects.order_by(Length(Coalesce('alias', 'name')))
self.assertQuerysetEqual(
authors, [
'Rhonda Simpson',
'John Smith',
],
lambda a: a.name
)
authors = Author.objects.order_by(Length(Coalesce('alias', 'name')).desc())
self.assertQuerysetEqual(
authors, [
'John Smith',
'Rhonda Simpson',
],
lambda a: a.name
)
def test_now(self):
ar1 = Article.objects.create(
title='How to Django',
text=lorem_ipsum,
written=timezone.now(),
)
ar2 = Article.objects.create(
title='How to Time Travel',
text=lorem_ipsum,
written=timezone.now(),
)
num_updated = Article.objects.filter(id=ar1.id, published=None).update(published=Now())
self.assertEqual(num_updated, 1)
num_updated = Article.objects.filter(id=ar1.id, published=None).update(published=Now())
self.assertEqual(num_updated, 0)
ar1.refresh_from_db()
self.assertIsInstance(ar1.published, datetime)
ar2.published = Now() + timedelta(days=2)
ar2.save()
ar2.refresh_from_db()
self.assertIsInstance(ar2.published, datetime)
self.assertQuerysetEqual(
Article.objects.filter(published__lte=Now()),
['How to Django'],
lambda a: a.title
)
self.assertQuerysetEqual(
Article.objects.filter(published__gt=Now()),
['How to Time Travel'],
lambda a: a.title
)
| bsd-3-clause |
smscannell/crazyflie-clients-python | lib/cfclient/ui/widgets/ai.py | 5 | 8202 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Attitude indicator widget.
"""
import sys
from PyQt4 import QtGui, QtCore
__author__ = 'Bitcraze AB'
__all__ = ['AttitudeIndicator']
class AttitudeIndicator(QtGui.QWidget):
"""Widget for showing attitude"""
def __init__(self):
super(AttitudeIndicator, self).__init__()
self.roll = 0
self.pitch = 0
self.hover = False
self.hoverASL = 0.0
self.hoverTargetASL = 0.0
self.setMinimumSize(30, 30)
# self.setMaximumSize(240,240)
def setRoll(self, roll):
self.roll = roll
self.repaint()
def setPitch(self, pitch):
self.pitch = pitch
self.repaint()
def setHover(self, target):
self.hoverTargetASL = target
self.hover = target > 0
self.repaint()
def setBaro(self, asl):
self.hoverASL = asl
self.repaint()
def setRollPitch(self, roll, pitch):
self.roll = roll
self.pitch = pitch
self.repaint()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
size = self.size()
w = size.width()
h = size.height()
qp.translate(w / 2, h / 2)
qp.rotate(self.roll)
qp.translate(0, (self.pitch * h) / 50)
qp.translate(-w / 2, -h / 2)
qp.setRenderHint(qp.Antialiasing)
font = QtGui.QFont('Serif', 7, QtGui.QFont.Light)
qp.setFont(font)
# Draw the blue
qp.setPen(QtGui.QColor(0, 61, 144))
qp.setBrush(QtGui.QColor(0, 61, 144))
qp.drawRect(-w, h / 2, 3 * w, -3 * h)
# Draw the marron
qp.setPen(QtGui.QColor(59, 41, 39))
qp.setBrush(QtGui.QColor(59, 41, 39))
qp.drawRect(-w, h / 2, 3 * w, 3 * h)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 1.5,
QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(-w, h / 2, 3 * w, h / 2)
# Drawing pitch lines
for ofset in [-180, 0, 180]:
for i in range(-900, 900, 25):
pos = (((i / 10.0) + 25 + ofset) * h / 50.0)
if i % 100 == 0:
length = 0.35 * w
if i != 0:
if ofset == 0:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(-i / 10))
qp.drawText((w / 2) - (length / 2) - (w * 0.08),
pos, "{}".format(-i / 10))
else:
qp.drawText((w / 2) + (length / 2) + (w * 0.06),
pos, "{}".format(i / 10))
qp.drawText((w / 2) - (length / 2) - (w * 0.08),
pos, "{}".format(i / 10))
elif i % 50 == 0:
length = 0.2 * w
else:
length = 0.1 * w
qp.drawLine((w / 2) - (length / 2), pos,
(w / 2) + (length / 2), pos)
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(0, 0, 0), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(0, 0, 0))
qp.setPen(pen)
qp.drawLine(0, h / 2, w, h / 2)
# Draw Hover vs Target
qp.setWorldMatrixEnabled(False)
pen = QtGui.QPen(QtGui.QColor(255, 255, 255), 2,
QtCore.Qt.SolidLine)
qp.setBrush(QtGui.QColor(255, 255, 255))
qp.setPen(pen)
fh = max(7, h / 50)
font = QtGui.QFont('Sans', fh, QtGui.QFont.Light)
qp.setFont(font)
qp.resetTransform()
qp.translate(0, h / 2)
if not self.hover:
# asl
qp.drawText(w - fh * 10, fh / 2, str(round(self.hoverASL, 2)))
if self.hover:
# target asl (center)
qp.drawText(
w - fh * 10, fh / 2, str(round(self.hoverTargetASL, 2)))
diff = round(self.hoverASL - self.hoverTargetASL, 2)
pos_y = -h / 6 * diff
# cap to +- 2.8m
if diff < -2.8:
pos_y = -h / 6 * -2.8
elif diff > 2.8:
pos_y = -h / 6 * 2.8
else:
pos_y = -h / 6 * diff
# difference from target (moves up and down +- 2.8m)
qp.drawText(w - fh * 3.8, pos_y + fh / 2, str(diff))
# vertical line
qp.drawLine(w - fh * 4.5, 0, w - fh * 4.5, pos_y)
# left horizontal line
qp.drawLine(w - fh * 4.7, 0, w - fh * 4.5, 0)
# right horizontal line
qp.drawLine(w - fh * 4.2, pos_y, w - fh * 4.5, pos_y)
if __name__ == "__main__":
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def updatePitch(self, pitch):
self.wid.setPitch(pitch - 90)
def updateRoll(self, roll):
self.wid.setRoll((roll / 10.0) - 180.0)
def updateTarget(self, target):
self.wid.setHover(500 + target / 10.)
def updateBaro(self, asl):
self.wid.setBaro(500 + asl / 10.)
def initUI(self):
vbox = QtGui.QVBoxLayout()
sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
sld.setFocusPolicy(QtCore.Qt.NoFocus)
sld.setRange(0, 3600)
sld.setValue(1800)
vbox.addWidget(sld)
self.wid = AttitudeIndicator()
sld.valueChanged[int].connect(self.updateRoll)
vbox.addWidget(self.wid)
hbox = QtGui.QHBoxLayout()
hbox.addLayout(vbox)
sldPitch = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldPitch.setFocusPolicy(QtCore.Qt.NoFocus)
sldPitch.setRange(0, 180)
sldPitch.setValue(90)
sldPitch.valueChanged[int].connect(self.updatePitch)
hbox.addWidget(sldPitch)
sldASL = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldASL.setFocusPolicy(QtCore.Qt.NoFocus)
sldASL.setRange(-200, 200)
sldASL.setValue(0)
sldASL.valueChanged[int].connect(self.updateBaro)
sldT = QtGui.QSlider(QtCore.Qt.Vertical, self)
sldT.setFocusPolicy(QtCore.Qt.NoFocus)
sldT.setRange(-200, 200)
sldT.setValue(0)
sldT.valueChanged[int].connect(self.updateTarget)
hbox.addWidget(sldT)
hbox.addWidget(sldASL)
self.setLayout(hbox)
self.setGeometry(50, 50, 510, 510)
self.setWindowTitle('Attitude Indicator')
self.show()
def changeValue(self, value):
self.c.updateBW.emit(value)
self.wid.repaint()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-2.0 |
sqlanywhere/sqlany-django | sqlany_django/introspection.py | 1 | 5278 | from django import VERSION as djangoVersion
if djangoVersion[:2] >= (1, 8):
from django.db.backends.base.introspection import BaseDatabaseIntrospection, TableInfo
else:
from django.db.backends import BaseDatabaseIntrospection
from sqlanydb import ProgrammingError, OperationalError
import re
import sqlanydb
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = { sqlanydb.DT_DATE : 'DateField',
sqlanydb.DT_TIME : 'DateTimeField',
sqlanydb.DT_TIMESTAMP : 'DateTimeField',
sqlanydb.DT_VARCHAR : 'CharField',
sqlanydb.DT_FIXCHAR : 'CharField',
sqlanydb.DT_LONGVARCHAR : 'CharField',
sqlanydb.DT_STRING : 'CharField',
sqlanydb.DT_DOUBLE : 'FloatField',
sqlanydb.DT_FLOAT : 'FloatField',
sqlanydb.DT_DECIMAL : 'IntegerField',
sqlanydb.DT_INT : 'IntegerField',
sqlanydb.DT_SMALLINT : 'IntegerField',
sqlanydb.DT_BINARY : 'BlobField',
sqlanydb.DT_LONGBINARY : 'BlobField',
sqlanydb.DT_TINYINT : 'IntegerField',
sqlanydb.DT_BIGINT : 'BigIntegerField',
sqlanydb.DT_UNSINT : 'IntegerField',
sqlanydb.DT_UNSSMALLINT : 'IntegerField',
sqlanydb.DT_UNSBIGINT : 'BigIntegerField',
sqlanydb.DT_BIT : 'IntegerField',
sqlanydb.DT_LONGNVARCHAR : 'CharField'
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute( "SELECT table_name,table_type FROM sys.SYSTAB WHERE "
"creator = USER_ID() and table_type in (1,2,3,4,21)" )
if djangoVersion[:2] < (1, 8):
return [row[0] for row in cursor.fetchall()]
return [TableInfo( row[0], 'v' if row[1] in ['2','21'] else 't' )
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT FIRST * FROM %s" %
self.connection.ops.quote_name(table_name))
return tuple((c[0], t, None, c[3], c[4], c[5], int(c[6]) == 1) for c, t in cursor.columns())
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = []
relations = {}
cursor.execute("""
SELECT (fidx.column_id - 1), t2.table_name, (pidx.column_id - 1) FROM SYSTAB t1
INNER JOIN SYSFKEY f ON f.foreign_table_id = t1.table_id
INNER JOIN SYSTAB t2 ON t2.table_id = f.primary_table_id
INNER JOIN SYSIDXCOL fidx ON fidx.table_id = f.foreign_table_id AND fidx.index_id = f.foreign_index_id
INNER JOIN SYSIDXCOL pidx ON pidx.table_id = f.primary_table_id AND pidx.index_id = f.primary_index_id
WHERE t1.table_name = %s""", [table_name])
constraints.extend(cursor.fetchall())
for my_field_index, other_table, other_field_index in constraints:
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# We need to skip multi-column indexes.
cursor.execute("""
select max(c.column_name),
max(ix.index_category),
max(ix."unique")
from SYSIDX ix, SYSTABLE t, SYSIDXCOL ixc, SYSCOLUMN c
where ix.table_id = t.table_id
and ixc.table_id = t.table_id
and ixc.index_id = ix.index_id
and ixc.table_id = c.table_id
and ixc.column_id = c.column_id
and t.table_name = %s
group by ix.index_id
having count(*) = 1
order by ix.index_id
""", [table_name])
indexes = {}
for col_name, cat, unique in cursor.fetchall():
indexes[col_name] = {
'primary_key': (cat == 1),
'unique': (unique == 1 or unique == 2) }
return indexes
| bsd-3-clause |
jtyuan/racetrack | src/cpu/BaseCPU.py | 2 | 13784 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# Copyright (c) 2011 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Rick Strong
# Andreas Hansson
import sys
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from Bus import CoherentBus
from InstTracer import InstTracer
from ExeTracer import ExeTracer
from MemObject import MemObject
from ClockDomain import *
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'alpha':
from AlphaTLB import AlphaDTB, AlphaITB
from AlphaInterrupts import AlphaInterrupts
from AlphaISA import AlphaISA
isa_class = AlphaISA
elif buildEnv['TARGET_ISA'] == 'sparc':
from SparcTLB import SparcTLB
from SparcInterrupts import SparcInterrupts
from SparcISA import SparcISA
isa_class = SparcISA
elif buildEnv['TARGET_ISA'] == 'x86':
from X86TLB import X86TLB
from X86LocalApic import X86LocalApic
from X86ISA import X86ISA
isa_class = X86ISA
elif buildEnv['TARGET_ISA'] == 'mips':
from MipsTLB import MipsTLB
from MipsInterrupts import MipsInterrupts
from MipsISA import MipsISA
isa_class = MipsISA
elif buildEnv['TARGET_ISA'] == 'arm':
from ArmTLB import ArmTLB, ArmStage2IMMU, ArmStage2DMMU
from ArmInterrupts import ArmInterrupts
from ArmISA import ArmISA
isa_class = ArmISA
elif buildEnv['TARGET_ISA'] == 'power':
from PowerTLB import PowerTLB
from PowerInterrupts import PowerInterrupts
from PowerISA import PowerISA
isa_class = PowerISA
class BaseCPU(MemObject):
type = 'BaseCPU'
abstract = True
cxx_header = "cpu/base.hh"
@classmethod
def export_methods(cls, code):
code('''
void switchOut();
void takeOverFrom(BaseCPU *cpu);
bool switchedOut();
void flushTLBs();
Counter totalInsts();
void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
''')
@classmethod
def memory_mode(cls):
"""Which memory mode does this CPU require?"""
return 'invalid'
@classmethod
def require_caches(cls):
"""Does the CPU model require caches?
Some CPU models might make assumptions that require them to
have caches.
"""
return False
@classmethod
def support_take_over(cls):
"""Does the CPU model support CPU takeOverFrom?"""
return False
def takeOverFrom(self, old_cpu):
self._ccObject.takeOverFrom(old_cpu._ccObject)
system = Param.System(Parent.any, "system object")
cpu_id = Param.Int(-1, "CPU identifier")
socket_id = Param.Unsigned(0, "Physical Socket identifier")
numThreads = Param.Unsigned(1, "number of HW thread contexts")
function_trace = Param.Bool(False, "Enable function trace")
function_trace_start = Param.Tick(0, "Tick to start function trace")
checker = Param.BaseCPU(NULL, "checker CPU")
do_checkpoint_insts = Param.Bool(True,
"enable checkpoint pseudo instructions")
do_statistics_insts = Param.Bool(True,
"enable statistics pseudo instructions")
profile = Param.Latency('0ns', "trace the kernel stack")
do_quiesce = Param.Bool(True, "enable quiesce instructions")
workload = VectorParam.Process([], "processes to run")
if buildEnv['TARGET_ISA'] == 'sparc':
dtb = Param.SparcTLB(SparcTLB(), "Data TLB")
itb = Param.SparcTLB(SparcTLB(), "Instruction TLB")
interrupts = Param.SparcInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.SparcISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'alpha':
dtb = Param.AlphaTLB(AlphaDTB(), "Data TLB")
itb = Param.AlphaTLB(AlphaITB(), "Instruction TLB")
interrupts = Param.AlphaInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.AlphaISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'x86':
dtb = Param.X86TLB(X86TLB(), "Data TLB")
itb = Param.X86TLB(X86TLB(), "Instruction TLB")
interrupts = Param.X86LocalApic(NULL, "Interrupt Controller")
isa = VectorParam.X86ISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'mips':
dtb = Param.MipsTLB(MipsTLB(), "Data TLB")
itb = Param.MipsTLB(MipsTLB(), "Instruction TLB")
interrupts = Param.MipsInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.MipsISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'arm':
dtb = Param.ArmTLB(ArmTLB(), "Data TLB")
itb = Param.ArmTLB(ArmTLB(), "Instruction TLB")
istage2_mmu = Param.ArmStage2MMU(ArmStage2IMMU(), "Stage 2 trans")
dstage2_mmu = Param.ArmStage2MMU(ArmStage2DMMU(), "Stage 2 trans")
interrupts = Param.ArmInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.ArmISA([ isa_class() ], "ISA instance")
elif buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
dtb = Param.PowerTLB(PowerTLB(), "Data TLB")
itb = Param.PowerTLB(PowerTLB(), "Instruction TLB")
interrupts = Param.PowerInterrupts(
NULL, "Interrupt Controller")
isa = VectorParam.PowerISA([ isa_class() ], "ISA instance")
else:
print "Don't know what TLB to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
max_insts_all_threads = Param.Counter(0,
"terminate when all threads have reached this inst count")
max_insts_any_thread = Param.Counter(0,
"terminate when any thread reaches this inst count")
simpoint_start_insts = VectorParam.Counter([],
"starting instruction counts of simpoints")
max_loads_all_threads = Param.Counter(0,
"terminate when all threads have reached this load count")
max_loads_any_thread = Param.Counter(0,
"terminate when any thread reaches this load count")
progress_interval = Param.Frequency('0Hz',
"frequency to print out the progress message")
switched_out = Param.Bool(False,
"Leave the CPU switched out after startup (used when switching " \
"between CPU models)")
tracer = Param.InstTracer(default_tracer, "Instruction tracer")
icache_port = MasterPort("Instruction Port")
dcache_port = MasterPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
_cached_ports += ["istage2_mmu.stage2_tlb.walker.port",
"dstage2_mmu.stage2_tlb.walker.port"]
_uncached_slave_ports = []
_uncached_master_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
_uncached_slave_ports += ["interrupts.pio", "interrupts.int_slave"]
_uncached_master_ports += ["interrupts.int_master"]
def createInterruptController(self):
if buildEnv['TARGET_ISA'] == 'sparc':
self.interrupts = SparcInterrupts()
elif buildEnv['TARGET_ISA'] == 'alpha':
self.interrupts = AlphaInterrupts()
elif buildEnv['TARGET_ISA'] == 'x86':
self.apic_clk_domain = DerivedClockDomain(clk_domain =
Parent.clk_domain,
clk_divider = 16)
self.interrupts = X86LocalApic(clk_domain = self.apic_clk_domain,
pio_addr=0x2000000000000000)
_localApic = self.interrupts
elif buildEnv['TARGET_ISA'] == 'mips':
self.interrupts = MipsInterrupts()
elif buildEnv['TARGET_ISA'] == 'arm':
self.interrupts = ArmInterrupts()
elif buildEnv['TARGET_ISA'] == 'power':
self.interrupts = PowerInterrupts()
else:
print "Don't know what Interrupt Controller to use for ISA %s" % \
buildEnv['TARGET_ISA']
sys.exit(1)
def connectCachedPorts(self, bus):
for p in self._cached_ports:
exec('self.%s = bus.slave' % p)
def connectUncachedPorts(self, bus):
for p in self._uncached_slave_ports:
exec('self.%s = bus.master' % p)
for p in self._uncached_master_ports:
exec('self.%s = bus.slave' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
if not uncached_bus:
uncached_bus = cached_bus
self.connectUncachedPorts(uncached_bus)
def addPrivateSplitL1Caches(self, ic, dc, iwc = None, dwc = None):
self.icache = ic
self.dcache = dc
self.icache_port = ic.cpu_side
self.dcache_port = dc.cpu_side
self._cached_ports = ['icache.mem_side', 'dcache.mem_side']
if buildEnv['TARGET_ISA'] in ['x86', 'arm']:
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
if buildEnv['TARGET_ISA'] in ['arm']:
self.itb_walker_cache_bus = CoherentBus()
self.dtb_walker_cache_bus = CoherentBus()
self.itb_walker_cache_bus.master = iwc.cpu_side
self.dtb_walker_cache_bus.master = dwc.cpu_side
self.itb.walker.port = self.itb_walker_cache_bus.slave
self.dtb.walker.port = self.dtb_walker_cache_bus.slave
self.istage2_mmu.stage2_tlb.walker.port = self.itb_walker_cache_bus.slave
self.dstage2_mmu.stage2_tlb.walker.port = self.dtb_walker_cache_bus.slave
else:
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
self._cached_ports += ["istage2_mmu.stage2_tlb.walker.port", \
"dstage2_mmu.stage2_tlb.walker.port"]
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
if buildEnv['TARGET_ISA'] in ['arm']:
self._cached_ports += ["checker.istage2_mmu.stage2_tlb.walker.port", \
"checker.dstage2_mmu.stage2_tlb.walker.port"]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc = None, dwc = None):
self.addPrivateSplitL1Caches(ic, dc, iwc, dwc)
# Set a width of 32 bytes (256-bits), which is four times that
# of the default bus. The clock of the CPU is inherited by
# default.
self.toL2Bus = CoherentBus(width = 32)
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
self.toL2Bus.master = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def createThreads(self):
self.isa = [ isa_class() for i in xrange(self.numThreads) ]
if self.checker != NULL:
self.checker.createThreads()
def addCheckerCpu(self):
pass
| bsd-3-clause |
wufulin/PyMusicPlayer | src/util/SongManager.py | 1 | 2318 | #encoding=utf-8
'''
@author: wufulin
'''
import os
import sys
sys.path.append("..")
from util.Tool import *
####################################
#SongManager
#歌曲列表管理类
####################################
class SongManager(object):
'''
播放列表管理器
'''
def __init__(self):
self.__SongList = []
self.__SongListFile = 'conf/playlist.txt'
def loadSongList(self):
"""从playlist.txt获取歌曲列表"""
if os.path.exists(self.__SongListFile) is True:
try:
file = open(self.__SongListFile, 'r')
except IOError:
pass
else:
self.__SongList = []
for song in file:
song = song.replace('\n','')
self.__SongList.append(song)
finally:
file.close()
def saveSongList(self):
"""将歌曲列表的内容保存在playlist.txt文件"""
try:
file = open(self.__SongListFile, "w")
except:
pass
else:
for song in self.__SongList:
file.write(song.__str__().encode(CODEC))
file.write("\n")
finally:
file.close()
def addSong(self, filePath):
"""添加一个歌曲文件全路径名到播放列表"""
song_formate = filePath.split('.')[-1]
if 'mp3' == song_formate \
or 'wav' == song_formate \
or 'WAV' == song_formate \
or 'wm' == song_formate \
or 'WM' == song_formate \
or 'MP3' == song_formate:
if 0 == self.__SongList.count(filePath.__str__()):
self.__SongList.append(filePath.__str__().encode(CODEC))
self.saveSongList()
return True
else:
return False
def deleteSongByIndex(self, index):
self.__SongList.remove(self.__SongList[index])
self.saveSongList()
def getSongByIndex(self, index):
return self.__SongList[index]
def getSongList(self):
return self.__SongList
def getSongCount(self):
return len(self.__SongList)
def printSongList(self):
for song in self.__SongList:
print song
| mit |
carsongee/edx-platform | common/djangoapps/terrain/stubs/tests/test_xqueue_stub.py | 40 | 6727 | """
Unit tests for stub XQueue implementation.
"""
import mock
import unittest
import json
import requests
import time
import copy
from ..xqueue import StubXQueueService, StubXQueueHandler
class FakeTimer(object):
"""
Fake timer implementation that executes immediately.
"""
def __init__(self, delay, func):
self.func = func
def start(self):
self.func()
class StubXQueueServiceTest(unittest.TestCase):
def setUp(self):
self.server = StubXQueueService()
self.url = "http://127.0.0.1:{0}/xqueue/submit".format(self.server.port)
self.addCleanup(self.server.shutdown)
# Patch the timer async calls
patcher = mock.patch('terrain.stubs.xqueue.post')
self.post = patcher.start()
self.addCleanup(patcher.stop)
# Patch POST requests
patcher = mock.patch('terrain.stubs.xqueue.Timer')
timer = patcher.start()
timer.side_effect = FakeTimer
self.addCleanup(patcher.stop)
def test_grade_request(self):
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
expected_body = json.dumps({'correct': True, 'score': 1, 'msg': '<div></div>'})
self._check_grade_response(callback_url, expected_header, expected_body)
def test_configure_default_response(self):
# Configure the default response for submissions to any queue
response_content = {'test_response': 'test_content'}
self.server.config['default'] = response_content
# Post a submission to the stub XQueue
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({
'student_info': 'test',
'grader_payload': 'test',
'student_response': 'test'
})
)
# Check the response we receive
# (Should be the default grading response)
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_configure_specific_response(self):
# Configure the XQueue stub response to any submission to the test queue
response_content = {'test_response': 'test_content'}
self.server.config['This is only a test.'] = response_content
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
expected_header = self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'This is only a test.'})
)
# Check that we receive the response we configured
self._check_grade_response(callback_url, expected_header, json.dumps(response_content))
def test_multiple_response_matches(self):
# Configure the XQueue stub with two responses that
# match the same submission
self.server.config['test_1'] = {'response': True}
self.server.config['test_2'] = {'response': False}
with mock.patch('terrain.stubs.http.LOGGER') as logger:
# Post a submission to the XQueue stub
callback_url = 'http://127.0.0.1:8000/test_callback'
self._post_submission(
callback_url, 'test_queuekey', 'test_queue',
json.dumps({'submission': 'test_1 and test_2'})
)
# Expect that we do NOT receive a response
# and that an error message is logged
self.assertFalse(self.post.called)
self.assertTrue(logger.error.called)
def test_register_submission_url(self):
# Configure the XQueue stub to notify another service
# when it receives a submission.
register_url = 'http://127.0.0.1:8000/register_submission'
self.server.config['register_submission_url'] = register_url
callback_url = 'http://127.0.0.1:8000/test_callback'
submission = json.dumps({'grader_payload': 'test payload'})
self._post_submission(callback_url, 'test_queuekey', 'test_queue', submission)
# Check that a notification was sent
self.post.assert_any_call(register_url, data={'grader_payload': u'test payload'})
def _post_submission(self, callback_url, lms_key, queue_name, xqueue_body):
"""
Post a submission to the stub XQueue implementation.
`callback_url` is the URL at which we expect to receive a grade response
`lms_key` is the authentication key sent in the header
`queue_name` is the name of the queue in which to send put the submission
`xqueue_body` is the content of the submission
Returns the header (a string) we send with the submission, which can
be used to validate the response we receive from the stub.
"""
# Post a submission to the XQueue stub
grade_request = {
'xqueue_header': json.dumps({
'lms_callback_url': callback_url,
'lms_key': 'test_queuekey',
'queue_name': 'test_queue'
}),
'xqueue_body': xqueue_body
}
resp = requests.post(self.url, data=grade_request)
# Expect that the response is success
self.assertEqual(resp.status_code, 200)
# Return back the header, so we can authenticate the response we receive
return grade_request['xqueue_header']
def _check_grade_response(self, callback_url, expected_header, expected_body):
"""
Verify that the stub sent a POST request back to us
with the expected data.
`callback_url` is the URL we expect the stub to POST to
`expected_header` is the header (a string) we expect to receive with the grade.
`expected_body` is the content (a string) we expect to receive with the grade.
Raises an `AssertionError` if the check fails.
"""
# Check the response posted back to us
# This is the default response
expected_callback_dict = {
'xqueue_header': expected_header,
'xqueue_body': expected_body,
}
# Check that the POST request was made with the correct params
self.post.assert_called_with(callback_url, data=expected_callback_dict)
| agpl-3.0 |
scippio/bitcoin | qa/rpc-tests/maxuploadtarget.py | 27 | 10783 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.comptool import wait_until
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * 1000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available / old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| mit |
rockneurotiko/django | tests/aggregation_regress/tests.py | 82 | 54019 | from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count('book')).aggregate(Max('book__count')),
{'book__count__max': 2}
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
| bsd-3-clause |
eeshangarg/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_directives/test_decorations.py | 19 | 1847 | #! /usr/bin/env python
# $Id: test_decorations.py 4667 2006-07-12 21:40:56Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for the "header" & "footer" directives.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['headers'] = [
["""\
.. header:: a paragraph for the header
""",
"""\
<document source="test data">
<decoration>
<header>
<paragraph>
a paragraph for the header
"""],
["""\
.. header::
""",
"""\
<document source="test data">
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Content block expected for the "header" directive; none found.
<literal_block xml:space="preserve">
.. header::
"""],
["""\
.. header:: first part of the header
.. header:: second part of the header
""",
"""\
<document source="test data">
<decoration>
<header>
<paragraph>
first part of the header
<paragraph>
second part of the header
"""],
]
totest['footers'] = [
["""\
.. footer:: a paragraph for the footer
""",
"""\
<document source="test data">
<decoration>
<footer>
<paragraph>
a paragraph for the footer
"""],
["""\
.. footer:: even if a footer is declared first
.. header:: the header appears first
""",
"""\
<document source="test data">
<decoration>
<header>
<paragraph>
the header appears first
<footer>
<paragraph>
even if a footer is declared first
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 |
jinglining/flink | flink-python/setup.py | 5 | 12946 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import glob
import io
import os
import platform
import subprocess
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 5):
print("Python versions prior to 3.5 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def find_file_path(pattern):
files = glob.glob(pattern)
if len(files) < 1:
print("Failed to find the file %s." % pattern)
exit(-1)
if len(files) > 1:
print("The file pattern %s is ambiguous: %s" % (pattern, files))
exit(-1)
return files[0]
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.fast_coder_impl",
sources=["pyflink/fn_execution/fast_coder_impl.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.fast_operations",
sources=["pyflink/fn_execution/fast_operations.pyx"],
include_dirs=["pyflink/fn_execution/"])
])
except ImportError:
if os.path.exists("pyflink/fn_execution/fast_coder_impl.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.fast_coder_impl",
sources=["pyflink/fn_execution/fast_coder_impl.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.fast_operations",
sources=["pyflink/fn_execution/fast_operations.c"],
include_dirs=["pyflink/fn_execution/"])
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
LIB_TEMP_PATH = os.path.join(TEMP_PATH, "lib")
OPT_TEMP_PATH = os.path.join(TEMP_PATH, "opt")
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
LICENSES_TEMP_PATH = os.path.join(TEMP_PATH, "licenses")
PLUGINS_TEMP_PATH = os.path.join(TEMP_PATH, "plugins")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
NOTICE_FILE_TEMP_PATH = os.path.join(this_directory, "NOTICE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
# Due to changes in FLINK-14008, the licenses directory and NOTICE file may not exist in
# build-target folder. Just ignore them in this case.
exist_licenses = None
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
incorrect_invocation_message = """
If you are installing pyflink from flink source, you must first build Flink and
run sdist.
To build Flink with maven you can run:
mvn -DskipTests clean package
Building the source dist is done in the flink-python directory:
cd flink-python
python setup.py sdist
pip install dist/*.tar.gz"""
LIB_PATH = os.path.join(FLINK_HOME, "lib")
OPT_PATH = os.path.join(FLINK_HOME, "opt")
OPT_PYTHON_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-python_*.jar")))
OPT_SQL_CLIENT_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-sql-client_*.jar")))
CONF_PATH = os.path.join(FLINK_HOME, "conf")
EXAMPLES_PATH = os.path.join(FLINK_HOME, "examples")
LICENSES_PATH = os.path.join(FLINK_HOME, "licenses")
PLUGINS_PATH = os.path.join(FLINK_HOME, "plugins")
SCRIPTS_PATH = os.path.join(FLINK_HOME, "bin")
LICENSE_FILE_PATH = os.path.join(FLINK_HOME, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_HOME, "README.txt")
exist_licenses = os.path.exists(LICENSES_PATH)
if not os.path.isdir(LIB_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
try:
os.symlink(LIB_PATH, LIB_TEMP_PATH)
support_symlinks = True
except BaseException: # pylint: disable=broad-except
support_symlinks = False
os.mkdir(OPT_TEMP_PATH)
if support_symlinks:
os.symlink(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
os.symlink(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
os.symlink(CONF_PATH, CONF_TEMP_PATH)
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(PLUGINS_PATH, PLUGINS_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
else:
copytree(LIB_PATH, LIB_TEMP_PATH)
copy(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
copy(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
copytree(CONF_PATH, CONF_TEMP_PATH)
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copytree(PLUGINS_PATH, PLUGINS_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
# copy the udf runner scripts
copytree(SCRIPTS_PATH, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
if exist_licenses and platform.system() != "Windows":
# regenerate the licenses directory and NOTICE file as we only copy part of the
# flink binary distribution.
collect_licenses_file_sh = os.path.abspath(os.path.join(
this_directory, "..", "tools", "releasing", "collect_license_files.sh"))
subprocess.check_output([collect_licenses_file_sh, TEMP_PATH, TEMP_PATH])
# move the NOTICE file to the root of the package
GENERATED_NOTICE_FILE_PATH = os.path.join(TEMP_PATH, "NOTICE")
os.rename(GENERATED_NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
else:
if not os.path.isdir(LIB_TEMP_PATH) or not os.path.isdir(OPT_TEMP_PATH) \
or not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
exist_licenses = os.path.exists(LICENSES_TEMP_PATH)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.dataset',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.metrics',
'pyflink.ml',
'pyflink.ml.api',
'pyflink.ml.api.param',
'pyflink.ml.lib',
'pyflink.ml.lib.param',
'pyflink.lib',
'pyflink.opt',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.plugins',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.lib': TEMP_PATH + '/lib',
'pyflink.opt': TEMP_PATH + '/opt',
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.plugins': TEMP_PATH + '/plugins',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.lib': ['*.jar'],
'pyflink.opt': ['*.*', '*/*'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.plugins': ['*', '*/*'],
'pyflink.bin': ['*']}
if exist_licenses and platform.system() != "Windows":
PACKAGES.append('pyflink.licenses')
PACKAGE_DIR['pyflink.licenses'] = TEMP_PATH + '/licenses'
PACKAGE_DATA['pyflink.licenses'] = ['*']
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.5',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.19.0',
'cloudpickle==1.2.2', 'avro-python3>=1.8.1,<=1.9.1', 'jsonpickle==1.2',
'pandas>=0.23.4,<=0.25.3', 'pyarrow>=0.15.1,<0.16.0', 'pytz>=2018.3'],
cmdclass={'build_ext': build_ext},
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(NOTICE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| apache-2.0 |
rlabrecque/CSteamworks | CSteamworks2.py | 1 | 4593 | #!/usr/bin/env python3
import os
from SteamworksParser import steamworksparser
CPP_HEADER = "// This file is automatically generated!\n\n"
g_SkippedFiles = (
# We don't currently support the following interfaces because they don't provide a factory of their own.
# You are expected to call GetISteamGeneric to get them.
"isteamappticket.h",
"isteamgamecoordinator.h",
# PS3 is not supported.
"isteamps3overlayrenderer.h",
)
g_TypeDict = {
'EHTMLMouseButton': 'ISteamHTMLSurface::EHTMLMouseButton',
'EHTMLKeyModifiers': 'ISteamHTMLSurface::EHTMLKeyModifiers',
}
def main():
entrypoints = []
cppfilenames = []
try:
os.makedirs("wrapper/")
except OSError:
pass
with open("wrapper/steam_api_c.h", "w") as header:
header.write(CPP_HEADER)
header.write("#pragma once\n\n")
header.write("#include \"CSteamworks.h\"\n\n")
#steamworksparser.Settings.warn_utf8bom = True
#steamworksparser.Settings.warn_includeguardname = True
#steamworksparser.Settings.print_unuseddefines = True
steamworksparser.Settings.fake_gameserver_interfaces = True
parser = steamworksparser.parse("steam/")
for f in parser.files:
if f.name in g_SkippedFiles:
continue
print("File: " + f.name)
if not f.interfaces:
continue
with open("wrapper/" + os.path.splitext(f.name)[0] + ".cpp", "w") as out, open ("wrapper/steam_api_c.h", "a") as header:
out.write(CPP_HEADER)
cppfilenames.append(os.path.splitext(f.name)[0] + ".cpp")
lastIfBlock = None
for i in f.interfaces:
print(" - " + i.name)
for func in i.functions:
if lastIfBlock is not None and lastIfBlock != func.ifstatements:
out.write("#endif\n")
lastIfBlock = None
if func.ifstatements and func.ifstatements != lastIfBlock:
out.write("#if " + func.ifstatements + "\n")
lastIfBlock = func.ifstatements
if func.private:
continue
def create_arg_string(arg):
argtype = g_TypeDict.get(arg.type, arg.type)
space = "" if argtype.endswith("*") and " " in argtype else " "
defaultarg = "" if not arg.default else " = " + arg.default
return argtype + space + arg.name + defaultarg
args = ", ".join([create_arg_string(arg) for arg in func.args])
argnames = ", ".join([arg.name for arg in func.args])
'''for arg in func.args:
argtype = g_TypeDict.get(arg.type, arg.type)
spaceHACK = " "
if argtype.endswith("*") and " " in argtype:
spaceHACK = "" # TODO: REMOVE THIS HACK
args += argtype + spaceHACK + arg.name
if arg.default:
args += " = " + arg.default
args += ", "
args = args[:-2]'''
returntype = func.returntype
ConvertToUint64 = ""
if returntype == "CSteamID":
returntype = "SteamID_t"
ConvertToUint64 = ".ConvertToUint64()"
# If a function overrides another with the same name but different args, we fix it up here so that it can be properly exposed.
strEntryPoint = i.name + "_" + func.name
if strEntryPoint in entrypoints:
strEntryPoint += "_"
entrypoints.append(strEntryPoint)
declaration = "SB_API " + returntype + " S_CALLTYPE " + strEntryPoint + "(" + args + ")"
header.write(declaration + ";\n")
out.write(declaration + " {\n")
out.write("\treturn " + i.name[1:] + "()->" + func.name + "(" + argnames + ")" + ConvertToUint64 + ";\n")
out.write("}\n")
out.write("\n")
if lastIfBlock:
out.write("#endif\n")
if cppfilenames:
with open("wrapper/unitybuild.cpp", "w") as out:
out.write(CPP_HEADER)
for filename in cppfilenames:
out.write("#include \"" + filename + "\"\n")
if __name__ == "__main__":
main()
| mit |
sahiljain/catapult | third_party/gsutil/third_party/apitools/apitools/base/py/stream_slice_test.py | 23 | 1677 | """Tests for stream_slice."""
import string
import six
import unittest2
from apitools.base.py import exceptions
from apitools.base.py import stream_slice
class StreamSliceTest(unittest2.TestCase):
def setUp(self):
self.stream = six.StringIO(string.ascii_letters)
self.value = self.stream.getvalue()
self.stream.seek(0)
def testSimpleSlice(self):
ss = stream_slice.StreamSlice(self.stream, 10)
self.assertEqual('', ss.read(0))
self.assertEqual(self.value[0:3], ss.read(3))
self.assertIn('7/10', str(ss))
self.assertEqual(self.value[3:10], ss.read())
self.assertEqual('', ss.read())
self.assertEqual('', ss.read(10))
self.assertEqual(10, self.stream.tell())
def testEmptySlice(self):
ss = stream_slice.StreamSlice(self.stream, 0)
self.assertEqual('', ss.read(5))
self.assertEqual('', ss.read())
self.assertEqual(0, self.stream.tell())
def testOffsetStream(self):
self.stream.seek(26)
ss = stream_slice.StreamSlice(self.stream, 26)
self.assertEqual(self.value[26:36], ss.read(10))
self.assertEqual(self.value[36:], ss.read())
self.assertEqual('', ss.read())
def testTooShortStream(self):
ss = stream_slice.StreamSlice(self.stream, 1000)
self.assertEqual(self.value, ss.read())
self.assertEqual('', ss.read(0))
with self.assertRaises(exceptions.StreamExhausted) as e:
ss.read()
with self.assertRaises(exceptions.StreamExhausted) as e:
ss.read(10)
self.assertIn('exhausted after %d' % len(self.value), str(e.exception))
| bsd-3-clause |
kiran/bart-sign | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| mit |
rhnvrm/iot-hackerearth | py/sim/wo/work.py | 1 | 5919 | from __future__ import division
import requests
import random
import time
import threading
import rethinkdb as r
import math
import numpy as np
import cv2
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from scipy.misc import imread
import os
plt.scatter([0,5],[0,5])
plt.ion()
plt.show()
plt.clf()
plt.gca().grid(1)
img = imread("im1.png")
# Coordinates of the Beacons
x1=0.5
y1=0
x2=0.5
y2=2.5
x3=2.5
y3=2.5
# End Segment
maximum =10
#init fences
FENCES = []
for i in xrange(0,3):
for j in xrange(0,3):
FENCES+=[[[i,j],[i,j+1],[i+1,j+1],[i+1,j]]]
def kmeans(Z,STO):# pg please make Z a np array like the one described below :P
#Z = np.array([[a1,b1],[x1,y1],[x2,y2],[a3,b3],[a2,b2]])
# convert to np.float32
plt.clf()
plt.imshow(img, zorder=0, extent=[-1,4,-6,3.5])
Z = np.float32(Z)
# define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,1,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
# pls add corresponding entries for each cluster
# Now separate the data, Note the flatten()
A = Z[label.ravel()==0]
B = Z[label.ravel()==1]
# Plot the data
#"""
#rempove for debug
plt.scatter(A[:,0],A[:,1])
plt.scatter(B[:,0],B[:,1],c = 'r')
plt.scatter([x1,x2,x3],[y1,y2,y3],s = 40, c = 'red')
plt.scatter(center[:,0],center[:,1],s = 80,c = 'y', marker = 's')
plt.xlabel('X'),plt.ylabel('Y')
plotfences(plt)
plt.draw()
plt.savefig("plot.png")
pt = [center[:,0],center[:,1]]
postdata(checkfences(pt),pt[0],pt[1])
STO = center
def Intersects(x1,y1,x2,y2,x3,y3,r1,r2,r3):
#Cirlce 1: r1^2 = x^2 + y^2
#Circle 2: r2^2 = (x - a)^2 + (y - b)^2
a = x2 - x1;
b = y2 - y1;
d = math.sqrt(a*a + b*b);
if (r1 + r2 <= d):
sx1,sy1,sx2,sy2 = ((r2*x1+r1*x2)/(r1+r2),((r2*y1+r1*y2)/(r1+r2)),(r2*x1+r1*x2)/(r1+r2),((r2*y1+r1*y2)/(r1+r2)))
elif ((d <= abs( r1 - r2 )) and (r1>r2)):
sx1,sy1,sx2,sy2 = ((r1*x2-r2*x1)/(r1-r2),((r1*y2-r2*y1)/(r1-r2)),(r1*x2-r2*x1)/(r1-r2),((r1*y2-r2*y1)/(r1-r2)))
elif ((d <= abs( r1 - r2 )) and (r2>r1)):
sx1,sy1,sx2,sy2 = ((r2*x1-r1*x2)/(r2-r1),((r2*y1-r1*y2)/(r2-r1)),(r2*x1-r1*x2)/(r2-r1),((r2*y1-r1*y2)/(r2-r1)))
else:
t = math.sqrt( (d + r1 + r2) * (d + r1 - r2) * (d - r1 + r2) * (-d + r1 + r2) )
sx1 = 0.5 * (a + (a*(r1*r1 - r2*r2) + b*t)/(d**2))
sx2 = 0.5 * (a + (a*(r1*r1 - r2*r2) - b*t)/(d**2))
sy1 = 0.5 * (b + (b*(r1*r1 - r2*r2) - a*t)/(d**2))
sy2 = 0.5 * (b + (b*(r1*r1 - r2*r2) + a*t)/(d**2))
sx1 = sx1 + x1
sy1 = sy1 + y1
sx2 = sx2 + x1
sy2 = sy2 + y1
# if take set with min abs error from dist from the 3rd side
if (abs((((sx1-x3)**2 +(sy1-y3)**2)**0.5)-r3)>=abs((((sx2-x3)**2 +(sy2-y3)**2)**0.5)-r3)):
return [[sx2,sy2]]
else:
return [[sx1,sy1]]
"""
#append the following to the storing array that passes to the kmeans clustering
print "x1 = %f" %sx1
print "y1 = %f" %sy1
print "x2 = %f" %sx2
print "y2 = %f" %sy2
#[sx1,sy1,sx2,sy2]
return [[sx1,sy1],[sx2,sy2]]
"""
def display_data(distances):
# hardcoded beacon data Start
#r1,r2,r3 = tuple(distances[i] for i in distances)
r1 = distance_lookup_table["B4:99:4C:66:4B:38"]
r2 = distance_lookup_table["B4:99:4C:66:5A:26"]
r3 = distance_lookup_table["B4:99:4C:66:2C:58"]
if(r1 < 0 or r2 < 0 or r3 < 0): return -1
print(r1,r2,r3)
# Hardcoded beacon data end
# END OF FUNCTION DECLARITIONS
alive = 3
nclusters = 2
PTS = []
#taking groups of 3
for i in range(0,alive-2):
for j in range(i+1,alive-1):
for k in range(j+1,alive):
PTS = PTS + Intersects(x1,y1,x2,y2,x3,y3,r1,r2,r3)
#print i,j
PTS = PTS + Intersects(x1,y1,x3,y3,x2,y2,r1,r3,r2)
#print j,k
PTS = PTS + Intersects(x2,y2,x3,y3,x1,y1,r2,r3,r1)
#print k,i
#"""
STO = []
CentreWeight = 10000000000000
#print STO
#print PTS
kmeans(np.array(PTS),np.array(STO))
def checkfences(pt): # ref to global variable FENCES
for i in range(0,len(FENCES)):
if(infence(FENCES[i],pt)):
return i
return -1
def getDistance(rssi, txPower):
return pow(10, ( txPower - rssi) / (10 * ATTN))
"""
fence1 = [[0,0],[0,1],[1,1],[1,0]] # define points in the order of loop
point1 = [1.05,1]
point2 = [2,2]
infence(fence1,point1)
"""
def infence(fence,pt):
bbPath = matplotlib.path.Path(np.array(fence))
return bbPath.contains_point((pt[0], pt[1]))
def plotfences(media):
for i in range(0,len(FENCES)):
rectangle = media.Rectangle((FENCES[i][0][0],FENCES[i][0][1]), 1, 1, fc='None')
media.gca().add_patch(rectangle)
def postdata(segment,x,y):
r=requests.post("http://0.0.0.0:8521/position", data = {"segment":segment ,"x":x, "y":y})
# main Begins Here
conn = r.connect( "0.0.0.0", 28015 , db='heck')
ATTN = 2
power_to_A_lookup_table = {"B4:99:4C:66:4B:38": -58, "B4:99:4C:66:5A:26": -62, "B4:99:4C:66:2C:58": -62}
distance_lookup_table = {"B4:99:4C:66:4B:38": -1, "B4:99:4C:66:5A:26": -1, "B4:99:4C:66:2C:58": -1}
#old_distance_lookup_table = {"B4:99:4C:57:AE:E3": -1, "B4:99:4C:57:D2:AA": -1, "B4:99:4C:57:EC:C6": -1}
#x = {u'old_val': {u'uid': u'B4:99:4C:57:EC:C6', u'rssi': -61, u'name': u'Bluetooth Device', u'timestamp': 1453011839.46865}, u'new_val': {u'uid': u'B4:99:4C:57:EC:C6', u'rssi': -55, u'name': u'Bluetooth Device', u'timestamp': 1453011857.281005}}
feed = r.table('beacons').changes().run(conn)
for change in feed:
if change['new_val']['uid'] in power_to_A_lookup_table:
#old_distance_lookup_table = distance_lookup_table
distance_lookup_table[change['new_val']['uid']] = getDistance(int(change['new_val']['rssi']), power_to_A_lookup_table[change['new_val']['uid']])
#for i in distance_lookup_table:
#print "here\n"
# print str(i) + " -> " + str(distance_lookup_table[i])
#t=threading.Thread(target=print_distance_lookup_table)
#d=threading.Thread(target=display_data)
display_data(distance_lookup_table)
#d.daemon = True
#t.daemon = True
#t.start()
#d.start()
| mit |
40223226/2015cdbg8 | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/process.py | 694 | 2304 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#for brython
from _multiprocessing import Process
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
# brython note: class Process is defined in /usr/libs/_multiprocessing.js
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| gpl-3.0 |
jymannob/CouchPotatoServer | libs/rtorrent/tracker.py | 173 | 5212 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class Tracker:
"""Represents an individual tracker within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent using this tracker
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
# for clarity's sake...
self.index = self.group # : position of tracker within the torrent's tracker list
self.rpc_id = "{0}:t{1}".format(
self.info_hash, self.index) # : unique id to pass to rTorrent
def __repr__(self):
return safe_repr("Tracker(index={0}, url=\"{1}\")",
self.index, self.url)
def enable(self):
"""Alias for set_enabled("yes")"""
self.set_enabled("yes")
def disable(self):
"""Alias for set_enabled("no")"""
self.set_enabled("no")
def update(self):
"""Refresh tracker data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
methods = [
# RETRIEVERS
Method(Tracker, 'is_enabled', 't.is_enabled', boolean=True),
Method(Tracker, 'get_id', 't.get_id'),
Method(Tracker, 'get_scrape_incomplete', 't.get_scrape_incomplete'),
Method(Tracker, 'is_open', 't.is_open', boolean=True),
Method(Tracker, 'get_min_interval', 't.get_min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.get_scrape_downloaded'),
Method(Tracker, 'get_group', 't.get_group'),
Method(Tracker, 'get_scrape_time_last', 't.get_scrape_time_last'),
Method(Tracker, 'get_type', 't.get_type'),
Method(Tracker, 'get_normal_interval', 't.get_normal_interval'),
Method(Tracker, 'get_url', 't.get_url'),
Method(Tracker, 'get_scrape_complete', 't.get_scrape_complete',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_last', 't.activity_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_next', 't.activity_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_last', 't.failed_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_next', 't.failed_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_last', 't.success_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_next', 't.success_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'can_scrape', 't.can_scrape',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'get_failed_counter', 't.failed_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_scrape_counter', 't.scrape_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_success_counter', 't.success_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'is_usable', 't.is_usable',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_busy', 't.is_busy',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_extra_tracker', 't.is_extra_tracker',
min_version=(0, 9, 1),
boolean=True,
),
Method(Tracker, "get_latest_sum_peers", "t.latest_sum_peers",
min_version=(0, 9, 0)
),
Method(Tracker, "get_latest_new_peers", "t.latest_new_peers",
min_version=(0, 9, 0)
),
# MODIFIERS
Method(Tracker, 'set_enabled', 't.set_enabled'),
]
| gpl-3.0 |
xzturn/tensorflow | tensorflow/python/autograph/converters/function_scopes.py | 3 | 4844 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wraps the body of a converted function with auxiliary constructs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import annos
class _Function(object):
def __init__(self):
self.context_name = None
class FunctionBodyTransformer(converter.Base):
"""Wraps function bodies around autograph-specific boilerplate."""
def visit_Return(self, node):
if node.value is None:
return node
return templates.replace(
'return function_context_name.mark_return_value(value)',
function_context_name=self.state[_Function].context_name,
value=node.value)
def _function_scope_options(self):
"""Returns the options with which to create function scopes."""
# Top-level function receive the options that were directly requested.
# All others receive the options corresponding to a recursive conversion.
# Note: this mainly controls the user_requested flag, which is important
# primarily because the FunctionScope context also creates a
# ControlStatusCtx(autograph=ENABLED) when user_requested is True. See
# function_wrappers.py.
if self.state[_Function].level == 2:
return self.ctx.program.options
return self.ctx.program.options.call_options()
def visit_Lambda(self, node):
self.state[_Function].enter()
node = self.generic_visit(node)
# Only wrap the top-level function. Theoretically, we can and should wrap
# everything, but that can lead to excessive boilerplate when lambdas are
# nested.
# TODO(mdan): Looks more closely for use cases that actually require this.
if self.state[_Function].level > 2:
self.state[_Function].exit()
return node
scope = anno.getanno(node, anno.Static.SCOPE)
function_context_name = self.ctx.namer.new_symbol('lscope',
scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
template = """
ag__.with_function_scope(
lambda function_context: body, function_context_name, options)
"""
node.body = templates.replace_as_expression(
template,
options=self._function_scope_options().to_ast(),
function_context=function_context_name,
function_context_name=gast.Constant(function_context_name, kind=None),
body=node.body)
self.state[_Function].exit()
return node
def visit_FunctionDef(self, node):
self.state[_Function].enter()
scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
function_context_name = self.ctx.namer.new_symbol('fscope',
scope.referenced)
self.state[_Function].context_name = function_context_name
anno.setanno(node, 'function_context_name', function_context_name)
node = self.generic_visit(node)
docstring_node = None
if node.body:
first_statement = node.body[0]
if (isinstance(first_statement, gast.Expr) and
isinstance(first_statement.value, gast.Constant)):
docstring_node = first_statement
node.body = node.body[1:]
template = """
with ag__.FunctionScope(
function_name, context_name, options) as function_context:
body
"""
wrapped_body = templates.replace(
template,
function_name=gast.Constant(node.name, kind=None),
context_name=gast.Constant(function_context_name, kind=None),
options=self._function_scope_options().to_ast(),
function_context=function_context_name,
body=node.body)
if docstring_node is not None:
wrapped_body = [docstring_node] + wrapped_body
node.body = wrapped_body
self.state[_Function].exit()
return node
def transform(node, ctx):
return FunctionBodyTransformer(ctx).visit(node)
| apache-2.0 |
aronparsons/spacewalk | backend/server/rhnServer/satellite_cert.py | 2 | 7227 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This module exposes the SatelliteCert class, used for parsing a satellite
# certificate
#
import string
import sys
from xml.dom.minidom import parseString
from xml.sax import SAXParseException
class ParseException(Exception):
pass
# Generic class to represent items (like channel families)
class Item:
# Name to be displayed by repr()
pretty_name = None
# Attribute name in the parent class
attribute_name = None
# Mapping from XML to local storage
attributes = {}
def __init__(self, node=None):
if not node:
return
for attr_name, storage_name in self.attributes.items():
attr = node.getAttribute(attr_name)
# Make sure we stringify the attribute - it may get out as unicode
setattr(self, storage_name, attr)
def __repr__(self):
return "<%s; %s>" % (self.pretty_name,
string.join(
map(lambda x, s=self: '%s="%s"' % (x, getattr(s, x)),
self.attributes.values()),
', '
)
)
class ChannelFamily(Item):
pretty_name = "channel family"
attribute_name = 'channel_families'
attributes = {'family': 'name', 'quantity': 'quantity', 'flex': 'flex'}
class Slots:
_db_label = None
_slot_name = None
def __init__(self, quantity):
self.quantity = quantity
def get_quantity(self):
return self.quantity
def get_db_label(self):
"Returns the label of this type of slot in the database"
return self._db_label
def get_slot_name(self):
"""Returns the name of the slot, used by
rhn_entitlements.modify_org_service"""
return self._slot_name
class ManagementSlots(Slots):
_db_label = 'enterprise_entitled'
_slot_name = 'enterprise'
class ProvisioningSlots(Slots):
_db_label = 'provisioning_entitled'
_slot_name = 'provisioning'
# Slots for virt entitlements support
class VirtualizationSlots(Slots):
_db_label = 'virtualization_host'
_slot_name = 'virtualization'
class VirtualizationPlatformSlots(Slots):
_db_label = 'virtualization_host_platform'
_slot_name = 'virtualization_platform'
# NonLinux slots are gone - misa 20050527
class MonitoringSlots(Slots):
_db_label = 'monitoring_entitled'
_slot_name = 'monitoring'
class SatelliteCert:
"""Satellite certificate class
Usage:
c = SatelliteCert()
c.load('<rhn-cert><rhn-cert-field name="owner">John Doe</rhn-cert-field></rhn-cert>')
print c.owner
"""
fields_scalar = ['product', 'owner', 'issued', 'expires', 'slots',
'provisioning-slots', 'nonlinux-slots',
'monitoring-slots', 'virtualization_host',
'virtualization_host_platform', 'satellite-version',
'generation', ]
fields_list = {'channel-families': ChannelFamily}
# datesFormat_cert = '%a %b %d %H:%M:%S %Y' ## OLD CERT FORMAT
datesFormat_cert = '%Y-%m-%d %H:%M:%S'
datesFormat_db = '%Y-%m-%d %H:%M:%S'
def __init__(self):
for f in self.fields_scalar:
setattr(self, f, None)
for f in self.fields_list.values():
setattr(self, f.attribute_name, [])
self.signature = None
self._slots = {}
def load(self, s):
try:
self._load(s)
except SAXParseException:
raise ParseException, None, sys.exc_info()[2]
# Now represent the slots in a more meaningful way
self._slots.clear()
for slot_name, (slot_attr, factory) in self._slot_maps.items():
quantity = getattr(self, slot_attr)
self._slots[slot_name] = factory(quantity)
return self
def _load(self, s):
dom_element = parseString(s)
certs = dom_element.getElementsByTagName("rhn-cert")
if not certs:
self._root = None
else:
self._root = certs[0]
for child in self._root.childNodes:
if child.nodeType != child.ELEMENT_NODE:
# Probably white space
continue
if child.nodeName == 'rhn-cert-field':
field_name = child.getAttribute("name")
if not field_name:
# XXX Bogus
continue
if field_name in self.fields_scalar:
val = get_text(child)
if not val:
continue
setattr(self, field_name, val)
continue
if self.fields_list.has_key(field_name):
val = self.fields_list[field_name](child)
l = getattr(self, val.attribute_name)
l.append(val)
elif child.nodeName == 'rhn-cert-signature':
self.signature = get_text(child)
# Python's docs say: When you are finished with a DOM, you should
# clean it up. This is necessary because some versions of Python do
# not support garbage collection of objects that refer to each other
# in a cycle. Until this restriction is removed from all versions of
# Python, it is safest to write your code as if cycles would not be
# cleaned up.
dom_element.unlink()
_slot_maps = {
'management': ('slots', ManagementSlots),
'provisioning': ('provisioning-slots', ProvisioningSlots),
'monitoring': ('monitoring-slots', MonitoringSlots),
'virtualization': ('virtualization_host', VirtualizationSlots),
'virtualization_platform': ('virtualization_host_platform', VirtualizationPlatformSlots)
}
def get_slots(self, slot_type):
if not self._slots.has_key(slot_type):
raise AttributeError(slot_type)
return self._slots[slot_type]
def get_slot_types(self):
return self._slot_maps.keys()
def lookup_slot_by_db_label(self, db_label):
# Given a string like 'sw_mgr_entitled', returns a string 'management'
for label, (slot_name, slot_class) in self._slot_maps.items():
if slot_class._db_label == db_label:
return label
return None
def get_text(node):
return string.join(
map(lambda x: x.data,
filter(lambda x: x.nodeType == x.TEXT_NODE, node.childNodes)
), "")
if __name__ == '__main__':
c = SatelliteCert()
c.load(open("cert").read())
print c.issued
| gpl-2.0 |
CENDARI/scrapy | tests/test_log.py | 15 | 5670 | from io import BytesIO
from twisted.python import log as txlog, failure
from twisted.trial import unittest
from scrapy import log
from scrapy.spider import Spider
from scrapy.settings import default_settings
from scrapy.utils.test import get_crawler
class LogTest(unittest.TestCase):
def test_get_log_level(self):
default_log_level = getattr(log, default_settings.LOG_LEVEL)
self.assertEqual(log._get_log_level('WARNING'), log.WARNING)
self.assertEqual(log._get_log_level(log.WARNING), log.WARNING)
self.assertRaises(ValueError, log._get_log_level, object())
class ScrapyFileLogObserverTest(unittest.TestCase):
level = log.INFO
encoding = 'utf-8'
def setUp(self):
self.f = BytesIO()
self.log_observer = log.ScrapyFileLogObserver(self.f, self.level, self.encoding)
self.log_observer.start()
def tearDown(self):
self.flushLoggedErrors()
self.log_observer.stop()
def logged(self):
return self.f.getvalue().strip()[25:]
def first_log_line(self):
logged = self.logged()
return logged.splitlines()[0] if logged else ''
def test_msg_basic(self):
log.msg("Hello")
self.assertEqual(self.logged(), "[scrapy] INFO: Hello")
def test_msg_ignore_spider(self):
spider = Spider("myspider")
log.msg("Hello", spider=spider)
self.failIf(self.logged())
def test_msg_level1(self):
log.msg("Hello", level=log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_level2(self):
log.msg("Hello", log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_wrong_level(self):
log.msg("Hello", level=9999)
self.assertEqual(self.logged(), "[scrapy] NOLEVEL: Hello")
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xc2\xa3100")
def test_msg_ignore_level(self):
log.msg("Hello", level=log.DEBUG)
log.msg("World", level=log.INFO)
self.assertEqual(self.logged(), "[scrapy] INFO: World")
def test_msg_ignore_system(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_msg_ignore_system_err(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_err_noargs(self):
try:
a = 1/0
except:
log.err()
self.assertIn('Traceback', self.logged())
self.assertIn('ZeroDivisionError', self.logged())
def test_err_why(self):
log.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[scrapy] ERROR: Wrong type")
self.assertIn('TypeError', self.logged())
self.assertIn('bad type', self.logged())
def test_error_outside_scrapy(self):
"""Scrapy logger should still print outside errors"""
txlog.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[-] ERROR: Wrong type")
self.assertIn('TypeError', self.logged())
self.assertIn('bad type', self.logged())
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xc2\xa3")
def test_err_exc(self):
log.err(TypeError("bad type"))
self.assertIn('Unhandled Error', self.logged())
self.assertIn('TypeError', self.logged())
self.assertIn('bad type', self.logged())
def test_err_failure(self):
log.err(failure.Failure(TypeError("bad type")))
self.assertIn('Unhandled Error', self.logged())
self.assertIn('TypeError', self.logged())
self.assertIn('bad type', self.logged())
class Latin1ScrapyFileLogObserverTest(ScrapyFileLogObserverTest):
encoding = 'latin-1'
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
logged = self.f.getvalue().strip()[25:]
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xa3100")
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xa3")
class CrawlerScrapyFileLogObserverTest(unittest.TestCase):
def setUp(self):
self.f = BytesIO()
self.crawler = get_crawler(Spider)
self.spider = self.crawler.spider = self.crawler._create_spider('test')
self.log_observer = log.ScrapyFileLogObserver(self.f, log.INFO,
'utf-8', self.crawler)
self.log_observer.start()
def tearDown(self):
self.flushLoggedErrors()
self.log_observer.stop()
def logged(self):
return self.f.getvalue().strip()[25:]
def test_msg_basic(self):
log.msg("Hello", spider=self.spider)
self.assertEqual(self.logged(), "[test] INFO: Hello")
def test_msg_ignore_scrapy_channel(self):
log.msg("Hello")
self.failIf(self.logged())
def test_msg_ignore_another_crawler(self):
crawler = get_crawler(Spider)
log.msg("Hello", spider=crawler._create_spider('test'))
self.failIf(self.logged())
def test_msg_stats_log(self):
assert self.crawler.stats.get_value('log_count/INFO', 0) == 0
log.msg("Hello", spider=self.spider)
self.assertEqual(self.crawler.stats.get_value('log_count/INFO'), 1)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Anonymouslemming/ansible | lib/ansible/plugins/action/iosxr_config.py | 46 | 4184 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.iosxr import ActionModule as _ActionModule
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(to_bytes(contents))
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
peihe/incubator-beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset_main.py | 18 | 2269 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
This example has in the juliaset/ folder all the code needed to execute the
workflow. It is organized in this way so that it can be packaged as a Python
package and later installed in the VM workers executing the job. The root
directory for the example contains just a "driver" script to launch the job
and the setup.py file needed to create a package.
The advantages for organizing the code is that large projects will naturally
evolve beyond just one module and you will have to make sure the additional
modules are present in the worker.
In Python Dataflow, using the --setup_file option when submitting a job, will
trigger creating a source distribution (as if running python setup.py sdist) and
then staging the resulting tarball in the staging area. The workers, upon
startup, will install the tarball.
Below is a complete command line for running the juliaset workflow remotely as
an example:
python juliaset_main.py \
--job_name juliaset-$USER \
--project YOUR-PROJECT \
--runner DataflowRunner \
--setup_file ./setup.py \
--staging_location gs://YOUR-BUCKET/juliaset/staging \
--temp_location gs://YOUR-BUCKET/juliaset/temp \
--coordinate_output gs://YOUR-BUCKET/juliaset/out \
--grid_size 20 \
"""
import logging
from apache_beam.examples.complete.juliaset.juliaset import juliaset
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
juliaset.run()
| apache-2.0 |
romanz/python-trezor | trezorlib/tests/device_tests/test_msg_lisk_getaddress.py | 3 | 1383 | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import lisk
from trezorlib.tools import parse_path
from .common import TrezorTest
LISK_PATH = parse_path("m/44h/134h/0h/1h")
@pytest.mark.lisk
class TestMsgLiskGetaddress(TrezorTest):
def test_lisk_getaddress(self):
self.setup_mnemonic_nopin_nopassphrase()
assert lisk.get_address(self.client, LISK_PATH[:2]) == "1431530009238518937L"
assert lisk.get_address(self.client, LISK_PATH[:3]) == "17563781916205589679L"
assert lisk.get_address(self.client, LISK_PATH) == "1874186517773691964L"
assert (
lisk.get_address(self.client, parse_path("m/44h/134h/999h/999h"))
== "16295203558710684671L"
)
| lgpl-3.0 |
munyirik/python | cpython/Lib/asyncio/queues.py | 3 | 8955 | """Queues"""
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
import collections
import heapq
from . import events
from . import futures
from . import locks
from .tasks import coroutine
class QueueEmpty(Exception):
"""Exception raised when Queue.get_nowait() is called on a Queue object
which is empty.
"""
pass
class QueueFull(Exception):
"""Exception raised when the Queue.put_nowait() method is called on a Queue
object which is full.
"""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "yield from put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Pairs of (item, Future).
self._putters = collections.deque()
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
self._init(maxsize)
# These three are overridable in subclasses.
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
def __repr__(self):
return '<{} at {:#x} {}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
return '<{} {}>'.format(type(self).__name__, self._format())
def _format(self):
result = 'maxsize={!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
result += ' _queue={!r}'.format(list(self._queue))
if self._getters:
result += ' _getters[{}]'.format(len(self._getters))
if self._putters:
result += ' _putters[{}]'.format(len(self._putters))
if self._unfinished_tasks:
result += ' tasks={}'.format(self._unfinished_tasks)
return result
def _consume_done_getters(self):
# Delete waiters at the head of the get() queue who've timed out.
while self._getters and self._getters[0].done():
self._getters.popleft()
def _consume_done_putters(self):
# Delete waiters at the head of the put() queue who've timed out.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
@coroutine
def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
This method is a coroutine.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
self.__put_internal(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
waiter = futures.Future(loop=self._loop)
self._putters.append((item, waiter))
yield from waiter
else:
self.__put_internal(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
self._consume_done_getters()
if self._getters:
assert not self._queue, (
'queue non-empty, why are getters waiting?')
getter = self._getters.popleft()
self.__put_internal(item)
# getter cannot be cancelled, we just removed done getters
getter.set_result(self._get())
elif self._maxsize > 0 and self._maxsize <= self.qsize():
raise QueueFull
else:
self.__put_internal(item)
@coroutine
def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
This method is a coroutine.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self.__put_internal(item)
# When a getter runs and frees up a slot so this putter can
# run, we need to defer the put for a tick to ensure that
# getters and putters alternate perfectly. See
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
return self._get()
elif self.qsize():
return self._get()
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
return (yield from waiter)
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
self._consume_done_putters()
if self._putters:
assert self.full(), 'queue not full, why are putters waiting?'
item, putter = self._putters.popleft()
self.__put_internal(item)
# Wake putter on next tick.
# getter cannot be cancelled, we just removed done putters
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
@coroutine
def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
yield from self._finished.wait()
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
| bsd-3-clause |
gauribhoite/personfinder | env/google_appengine/lib/jinja2-2.6/jinja2/parser.py | 171 | 35218 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.utils import next
from jinja2.lexer import describe_token, describe_token_expr
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(map(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hypens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
priority_on_attribute = False
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| apache-2.0 |
JioCloud/nova | nova/tests/functional/v3/test_volumes.py | 30 | 14313 | # Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import datetime
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import objects
from nova.tests.functional.v3 import api_sample_base
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-volumes"
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
create_subs = {
'snapshot_name': 'snap-001',
'description': 'Daily backup',
'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
}
def _get_flags(self):
f = super(SnapshotsSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.volumes.Volumes')
return f
def setUp(self):
super(SnapshotsSampleJsonTests, self).setUp()
self.stubs.Set(cinder.API, "get_all_snapshots",
fakes.stub_snapshot_get_all)
self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
def _create_snapshot(self):
self.stubs.Set(cinder.API, "create_snapshot",
fakes.stub_snapshot_create)
response = self._do_post("os-snapshots",
"snapshot-create-req",
self.create_subs)
return response
def test_snapshots_create(self):
response = self._create_snapshot()
self.create_subs.update(self._get_regexes())
self._verify_response("snapshot-create-resp",
self.create_subs, response, 200)
def test_snapshots_delete(self):
self.stubs.Set(cinder.API, "delete_snapshot",
fakes.stub_snapshot_delete)
self._create_snapshot()
response = self._do_delete('os-snapshots/100')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_snapshots_detail(self):
response = self._do_get('os-snapshots/detail')
subs = self._get_regexes()
self._verify_response('snapshots-detail-resp', subs, response, 200)
def test_snapshots_list(self):
response = self._do_get('os-snapshots')
subs = self._get_regexes()
self._verify_response('snapshots-list-resp', subs, response, 200)
def test_snapshots_show(self):
response = self._do_get('os-snapshots/100')
subs = {
'snapshot_name': 'Default name',
'description': 'Default description'
}
subs.update(self._get_regexes())
self._verify_response('snapshots-show-resp', subs, response, 200)
class VolumesSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-volumes"
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(VolumesSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.volumes.Volumes')
return f
def _get_volume_id(self):
return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
def _stub_volume(self, id, displayname="Volume Name",
displaydesc="Volume Description", size=100):
volume = {
'id': id,
'size': size,
'availability_zone': 'zone1:host1',
'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
'mountpoint': '/',
'status': 'in-use',
'attach_status': 'attached',
'name': 'vol name',
'display_name': displayname,
'display_description': displaydesc,
'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'Backup'}
}
return volume
def _stub_volume_get(self, context, volume_id):
return self._stub_volume(volume_id)
def _stub_volume_delete(self, context, *args, **param):
pass
def _stub_volume_get_all(self, context, search_opts=None):
id = self._get_volume_id()
return [self._stub_volume(id)]
def _stub_volume_create(self, context, size, name, description, snapshot,
**param):
id = self._get_volume_id()
return self._stub_volume(id)
def setUp(self):
super(VolumesSampleJsonTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
self.stubs.Set(cinder.API, "get", self._stub_volume_get)
self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
def _post_volume(self):
subs_req = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
self.stubs.Set(cinder.API, "create", self._stub_volume_create)
response = self._do_post('os-volumes', 'os-volumes-post-req',
subs_req)
subs = self._get_regexes()
subs.update(subs_req)
self._verify_response('os-volumes-post-resp', subs, response, 200)
def test_volumes_show(self):
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
vol_id = self._get_volume_id()
response = self._do_get('os-volumes/%s' % vol_id)
subs.update(self._get_regexes())
self._verify_response('os-volumes-get-resp', subs, response, 200)
def test_volumes_index(self):
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
response = self._do_get('os-volumes')
subs.update(self._get_regexes())
self._verify_response('os-volumes-index-resp', subs, response, 200)
def test_volumes_detail(self):
# For now, index and detail are the same.
# See the volumes api
subs = {
'volume_name': "Volume Name",
'volume_desc': "Volume Description",
}
response = self._do_get('os-volumes/detail')
subs.update(self._get_regexes())
self._verify_response('os-volumes-detail-resp', subs, response, 200)
def test_volumes_create(self):
self._post_volume()
def test_volumes_delete(self):
self._post_volume()
vol_id = self._get_volume_id()
response = self._do_delete('os-volumes/%s' % vol_id)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
class VolumeAttachmentsSampleBase(test_servers.ServersSampleBase):
def _stub_db_bdms_get_all_by_instance(self, server_id):
def fake_bdms_get_all_by_instance(context, instance_uuid,
use_slave=False):
bdms = [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
'instance_uuid': server_id, 'source_type': 'volume',
'destination_type': 'volume', 'device_name': '/dev/sdd'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
'instance_uuid': server_id, 'source_type': 'volume',
'destination_type': 'volume', 'device_name': '/dev/sdc'})
]
return bdms
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
def _stub_compute_api_get(self):
def fake_compute_api_get(self, context, instance_id,
want_objects=False, expected_attrs=None):
if want_objects:
return fake_instance.fake_instance_obj(
context, **{'uuid': instance_id})
else:
return {'uuid': instance_id}
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
class VolumeAttachmentsSampleJsonTest(VolumeAttachmentsSampleBase):
extra_extensions_to_load = ["os-access-ips"]
extension_name = "os-volumes"
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(VolumeAttachmentsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.volumes.Volumes')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'volume_attachment_update.Volume_attachment_update')
return f
def test_attach_volume_to_server(self):
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
device_name = '/dev/vdd'
bdm = objects.BlockDeviceMapping()
bdm['device_name'] = device_name
self.stubs.Set(compute_manager.ComputeManager,
"reserve_block_device_name",
lambda *a, **k: bdm)
self.stubs.Set(compute_manager.ComputeManager,
'attach_volume',
lambda *a, **k: None)
self.stubs.Set(objects.BlockDeviceMapping, 'get_by_volume_id',
classmethod(lambda *a, **k: None))
volume = fakes.stub_volume_get(None, context.get_admin_context(),
'a26887c6-c47b-4654-abb5-dfadf7d3f803')
subs = {
'volume_id': volume['id'],
'device': device_name
}
server_id = self._post_server()
response = self._do_post('servers/%s/os-volume_attachments'
% server_id,
'attach-volume-to-server-req', subs)
subs.update(self._get_regexes())
self._verify_response('attach-volume-to-server-resp', subs,
response, 200)
def test_list_volume_attachments(self):
server_id = self._post_server()
self._stub_db_bdms_get_all_by_instance(server_id)
response = self._do_get('servers/%s/os-volume_attachments'
% server_id)
subs = self._get_regexes()
self._verify_response('list-volume-attachments-resp', subs,
response, 200)
def test_volume_attachment_detail(self):
server_id = self._post_server()
attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
self._stub_db_bdms_get_all_by_instance(server_id)
self._stub_compute_api_get()
response = self._do_get('servers/%s/os-volume_attachments/%s'
% (server_id, attach_id))
subs = self._get_regexes()
self._verify_response('volume-attachment-detail-resp', subs,
response, 200)
def test_volume_attachment_delete(self):
server_id = self._post_server()
attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
self._stub_db_bdms_get_all_by_instance(server_id)
self._stub_compute_api_get()
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
response = self._do_delete('servers/%s/os-volume_attachments/%s'
% (server_id, attach_id))
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_volume_attachment_update(self):
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
subs = {
'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f805'
}
server_id = self._post_server()
attach_id = 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
self._stub_db_bdms_get_all_by_instance(server_id)
self._stub_compute_api_get()
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(compute_api.API, 'swap_volume', lambda *a, **k: None)
response = self._do_put('servers/%s/os-volume_attachments/%s'
% (server_id, attach_id),
'update-volume-req',
subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
| apache-2.0 |
MagnetoTesting/magneto | magneto/utils/adb.py | 3 | 11444 | import os
import subprocess
import re
import threading
from functools import wraps
from concurrent.futures import Future, wait
import time
from ..logger import Logger
class ADB(object):
"""
Wrapper for adb commands.
Import::
from magneto.utils.adb import ADB
:required: Android SDK installed and ANDROID_HOME pointing to SDK directory location.
"""
_lock = threading.Lock()
device_id = None
ADB_PATH = os.path.join(os.environ['ANDROID_HOME'], 'platform-tools', 'adb')
@classmethod
def exec_cmd(cls, exec_cmd, stdin=None, stdout=None, stderr=None):
"""
Executes adb commands::
ADB.exec_cmd('shell uninstall com.android.package')
In order to retrieve the process output, tap into the stdout::
import subprocess
proc = ADB.exec_cmd('shell pm list packages', stdout=subprocess.PIPE)
list = proc.stdout.readline().strip('\\r\\n')
:param str exec_cmd: Adb command to execute
:param stdin:
:param stdout:
:param stderr:
:return: Adb process
"""
with cls._lock:
cmd = 'exec {adb_path} {device_id} {command}'.format(
adb_path=cls.ADB_PATH,
device_id='-s {0}'.format(cls.device_id) if cls.device_id else '',
command=exec_cmd
)
return subprocess.Popen(cmd, shell=True, stdin=stdin, stdout=stdout, stderr=stderr)
@classmethod
def get_log(cls):
"""
Get log adb logcat.
:return: Log dump
"""
dump = []
p = cls.exec_cmd("logcat -v time -d", stdout=subprocess.PIPE)
for l in p.stdout:
dump.append(l.strip())
return dump
@classmethod
def clear_log(cls):
"""
Clears adb log
:return: Adb process
"""
return cls.exec_cmd("logcat -c")
@classmethod
def start_activity(cls, package_name, activity_name, extras=None):
"""
Start app activity.
Open Gmail app::
ADB.start_activity('com.google.android.gm', 'com.google.android.gm.ConversationListActivity')
:param str package_name: Package name
:param str activity_name: Activity name
:return:
"""
Logger.debug('opening activity {}'.format(activity_name))
cls.exec_cmd("shell am start {0} {1}/{2}".format(extras or '', package_name, activity_name)).wait()
@classmethod
def kill_process(cls, package_name):
"""
Stops given app::
ADB.kill_process('com.android.chrome')
:param str package_name: Package name to stop
:return:
"""
cls.exec_cmd("shell am force-stop {0}".format(package_name)).wait()
@classmethod
def uninstall(cls, package_name):
Logger.debug('Uninstalling app {}'.format(package_name))
cls.exec_cmd('uninstall {0}'.format(package_name)).wait()
# remove app cache
cls.exec_cmd('shell su -c "rm -rf /data/data/{0}"'.format(package_name)).wait()
@classmethod
def install(cls, apk_path, extra_params='', retry=True):
Logger.debug('Installing app {}'.format(apk_path))
p = cls.exec_cmd('install {} {}'.format(extra_params, apk_path), stdout=subprocess.PIPE)
result = p.communicate()[0].strip().split('\r\n')[-1]
if result == 'Success':
Logger.debug('App installed')
return True
elif result != 'Success' and retry:
Logger.debug('App install failed: {}'.format(result))
Logger.debug('Retrying install...')
return cls.install(apk_path, extra_params=extra_params, retry=False)
else:
Logger.debug('App install failed: {}'.format(result))
return False
@classmethod
def getprop(cls, prop):
p = cls.exec_cmd('shell getprop {}'.format(prop), stdout=subprocess.PIPE)
return p.stdout.readline().strip('\r\n')
@classmethod
def set_datetime(cls, dt):
adb_date = dt.strftime('%Y%m%d.%H%M%S')
cls.exec_cmd("shell \"su -c 'date -s {adb_date}'\"".format(adb_date=adb_date)).wait()
class RegexMatcher(object):
def __init__(self, pattern):
self._pattern = pattern
def __call__(self, line):
return self._pattern.search(line)
def __str__(self):
return self._pattern.pattern
class ADBLogWatch(threading.Thread):
"""
Enables watching adb logcat logs and asserting that they certain ones appeared::
from magneto.utils.adb import ADBLogWatch
def test_button_click_log(self, watcher):
with ADBLogWatch() as watcher:
watcher.watch('button clicked')
self.magneto(text='Click here').click()
watcher.assert_done()
"""
instance = None
def __init__(self):
ADB.clear_log().wait()
super(ADBLogWatch, self).__init__()
self._watchers = {}
self.asked_to_stop = False
if self.__class__.instance is not None:
raise RuntimeError('Only one instance of {} is allowed at any time.'.format(self.__class__.__name__))
self.__class__.instance = self
@classmethod
def wrap(cls, fn):
@wraps(fn)
def wrapper(instance, *args, **kwargs):
self = cls()
with self:
return fn(instance, self, *args, **kwargs)
return wrapper
def __enter__(self, *_):
self.start()
return self
def __exit__(self, *_):
self.exit()
def run(self):
p = ADB.exec_cmd('logcat', stdout=subprocess.PIPE)
while not self.asked_to_stop:
line = p.stdout.readline().strip()
if line == '':
break
for matcher, (future, min_times) in self._watchers.items():
match = matcher(line)
if match:
Logger.debug('Found logcat for "{}"'.format(matcher))
if min_times == 1:
future.set_result(line)
Logger.debug('Removing watch "{}"'.format(matcher))
del self._watchers[matcher]
else:
self._watchers[matcher] = future, min_times - 1
p.terminate()
Logger.debug('ADB logcat process terminated')
def exit(self):
self.__class__.instance = None
self.asked_to_stop = True
self.join()
def watch(self, pattern, **kwargs):
"""
Watch for given pattern in logs.
:param pattern: Regular expression pattern to perform on logcat lines
:param int min_times: Minimum times a certain pattern should appear in log
"""
if callable(pattern):
Logger.debug('watching pattern "{}"'.format(str(pattern)))
future = Future()
self._watchers[pattern] = future, kwargs.get('min_times', 1)
return future
else:
return self.watch_compiled(re.compile(pattern), **kwargs)
def watch_compiled(self, pattern, min_times=1):
regex_matcher = RegexMatcher(pattern)
Logger.debug('watching pattern "{}"'.format(str(regex_matcher)))
future = Future()
self._watchers[regex_matcher] = future, min_times
return future
def assert_done(self, timeout=15, stall=None, futures=None):
"""
Asserts if all/some watches exist in log.
:param futures: The futures we want to wait to end
:param timeout: Amount of time in seconds to wait for watches to appear in log. Defaults to 5 seconds.
:param stall: Amount of time in seconds to keep waiting before exiting the wait. More info in https://github.com/EverythingMe/magneto/issues/1
Example::
from magneto.utils.adb import ADBLogWatch
def test_incoming_call(self, watcher):
with ADBLogWatch() as watcher:
watcher.watch('call arrived')
call_device()
watcher.assert_done()
Example with stall::
from magneto.utils.adb import ADBLogWatch
def test_incoming_call(self, watcher):
with ADBLogWatch() as watcher:
watcher.watch('call arrived')
call_device()
watcher.assert_done(timeout=10, stall=120)
"""
if futures:
if not isinstance(futures, list):
futures = [futures]
else:
futures = [future for (future, _) in self._watchers.itervalues()]
Logger.debug('waiting up to {} seconds for {} watchers'.format(timeout, len(futures)))
start_time = time.time()
done, pending = wait(futures, timeout=timeout)
if not pending:
Logger.debug('watchers done')
return
if stall:
stall_time = stall-int(time.time() - start_time)
Logger.debug('Stalling for another %d seconds' % stall_time)
done, pending = wait(futures, timeout=stall_time)
if not pending:
Logger.debug('watchers done')
patterns_left = '\n'.join('pattern: {}'.format(str(r)) for r in self._watchers)
raise AssertionError(
'assert_done failure.\nWaited {} seconds but still have {} watchers:\n{}'
.format(timeout, len(self._watchers), patterns_left)
)
def assert_watch(self, *args, **kwargs):
"""
Asserts specific watchers if they exists in log
:param args: <Future> all watchers we want to wait to end
:param kwargs: extra params to pass to assert_done like timeout..
:return: self.asset_done
"""
return self.assert_done(futures=list(args), **kwargs)
class ADBVideoCapture(threading.Thread):
def __init__(self, video_file_name, timelimit=180, bitrate=1000000, size='1280x720'):
super(ADBVideoCapture, self).__init__()
self.video_file_name = video_file_name
self.recording_process = None
self.files = []
self.asked_to_stop = False
self.timelimit = timelimit
self.bitrate = bitrate
self.size = size
self.daemon = True
self.start()
def run(self):
self.start_recording()
while not self.asked_to_stop:
if self.recording_process.poll() is not None:
self.start_recording()
self.recording_process.terminate()
def start_recording(self):
self.files.append('/sdcard/{0}-{1}.video.mp4'.format(self.video_file_name, len(self.files) + 1))
self.recording_process = ADB.exec_cmd(
'shell screenrecord --bit-rate {bitrate} --time-limit {timelimit} --size {size} {file}'.format(
bitrate=self.bitrate,
timelimit=self.timelimit,
size=self.size,
file=self.files[
len(self.files) - 1]))
def stop_recording(self):
self.asked_to_stop = True
def save_files(self, save_file_path):
for video_file in self.files:
ADB.exec_cmd('pull {0} {1}'.format(video_file, save_file_path)).wait()
def delete_files(self):
for video_file in self.files:
ADB.exec_cmd('shell rm {0}'.format(video_file)).wait()
| apache-2.0 |
likelyzhao/mxnet | python/mxnet/_ctypes/symbol.py | 2 | 7644 | # coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement
"""Symbolic configuration API."""
from __future__ import absolute_import as _abs
import ctypes
import sys
import numpy as _numpy
from ..base import _LIB
from ..base import c_array, c_str, mx_uint, py_str
from ..base import SymbolHandle, OpHandle
from ..base import check_call
from ..symbol_doc import _build_doc
from ..name import NameManager
from ..attribute import AttrScope
_symbol_cls = None
class SymbolBase(object):
"""Symbol is symbolic graph."""
__slots__ = ["handle"]
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
self.handle = handle
def __del__(self):
check_call(_LIB.NNSymbolFree(self.handle))
def _compose(self, *args, **kwargs):
"""Compose symbol on inputs.
This call mutates the current symbol.
Parameters
----------
args:
provide positional arguments
kwargs:
provide keyword arguments
Returns
-------
the resulting symbol
"""
name = kwargs.pop('name', None)
if name:
name = c_str(name)
if len(args) != 0 and len(kwargs) != 0:
raise TypeError('compose only accept input Symbols \
either as positional or keyword arguments, not both')
for arg in args:
if not isinstance(arg, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
for val in kwargs.values():
if not isinstance(val, SymbolBase):
raise TypeError('Compose expect `Symbol` as arguments')
num_args = len(args) + len(kwargs)
if len(kwargs) != 0:
keys = c_array(ctypes.c_char_p, [c_str(key) for key in kwargs.keys()])
args = c_array(SymbolHandle, [s.handle for s in kwargs.values()])
else:
keys = None
args = c_array(SymbolHandle, [s.handle for s in args])
check_call(_LIB.NNSymbolCompose(
self.handle, name, num_args, keys, args))
def _set_attr(self, **kwargs):
"""Set the attribute of the symbol.
Parameters
----------
**kwargs
The attributes to set
"""
keys = c_array(ctypes.c_char_p,
[c_str(key) for key in kwargs.keys()])
vals = c_array(ctypes.c_char_p,
[c_str(str(val)) for val in kwargs.values()])
num_args = mx_uint(len(kwargs))
check_call(_LIB.MXSymbolSetAttrs(
self.handle, num_args, keys, vals))
def _set_handle(self, handle):
"""Set handle."""
self.handle = handle
def __reduce__(self):
return (_symbol_cls, (None,), self.__getstate__())
def _set_symbol_class(cls):
"""Set the symbolic class to be cls"""
global _symbol_cls
_symbol_cls = cls
def _make_atomic_symbol_function(handle, name):
"""Create an atomic symbol function by handle and funciton name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
func_name = name
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(func_name,
py_str(desc.value),
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
def creator(*args, **kwargs):
"""Activation Operator of Neural Net.
The parameters listed below can be passed in as keyword arguments.
Parameters
----------
name : string, required.
Name of the resulting symbol.
Returns
-------
symbol: Symbol
the resulting symbol
"""
param_keys = []
param_vals = []
symbol_kwargs = {}
attr = kwargs.pop('attr', None)
kwargs.update(AttrScope.current.get(attr))
name = kwargs.pop('name', None)
if 'dtype' in kwargs:
kwargs['dtype'] = _numpy.dtype(kwargs['dtype']).name
if key_var_num_args and key_var_num_args not in kwargs:
param_keys.append(c_str(key_var_num_args))
param_vals.append(c_str(str(len(args))))
for k, v in kwargs.items():
if isinstance(v, SymbolBase):
symbol_kwargs[k] = v
else:
param_keys.append(c_str(k))
param_vals.append(c_str(str(v)))
# create atomic symbol
param_keys = c_array(ctypes.c_char_p, param_keys)
param_vals = c_array(ctypes.c_char_p, param_vals)
sym_handle = SymbolHandle()
check_call(_LIB.MXSymbolCreateAtomicSymbol(
handle,
mx_uint(len(param_keys)),
param_keys, param_vals,
ctypes.byref(sym_handle)))
if len(args) != 0 and len(symbol_kwargs) != 0:
raise TypeError(
'%s can only accept input'
'Symbols either as positional or keyword arguments, not both' % func_name)
s = _symbol_cls(sym_handle)
hint = func_name.lower()
name = NameManager.current.get(name, hint)
s._compose(*args, name=name, **symbol_kwargs)
return s
creator.__name__ = func_name
creator.__doc__ = doc_str
creator.__module__ = 'mxnet.symbol'
return creator
def _init_symbol_module(symbol_class, root_namespace):
"""List and add all the atomic symbol functions to current module."""
_set_symbol_class(symbol_class)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_obj = sys.modules["%s.symbol" % root_namespace]
module_internal = sys.modules["%s._symbol_internal" % root_namespace]
module_contrib = sys.modules["%s.contrib.symbol" % root_namespace]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
function = _make_atomic_symbol_function(hdl, name)
if function.__name__.startswith('_contrib_'):
function.__name__ = function.__name__[9:]
function.__module__ = 'mxnet.contrib.symbol'
setattr(module_contrib, function.__name__, function)
elif function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
setattr(module_obj, function.__name__, function)
| apache-2.0 |
clovis/PhiloLogic4 | python/philologic/runtime/DB.py | 2 | 8445 | """DB class containing many niceties to retrieve data from SQL"""
import hashlib
import os
import sqlite3
from philologic.Config import Config, db_locals_defaults, db_locals_header
from philologic.runtime import Query
from . import HitList
from . import MetadataQuery
from . import QuerySyntax
from .HitWrapper import HitWrapper, PageWrapper
def hit_to_string(hit, width):
"""Convert Philo hit to a string"""
if isinstance(hit, sqlite3.Row):
hit = hit["philo_id"]
if isinstance(hit, str):
hit = [int(x) for x in hit.split(" ")]
if isinstance(hit, int):
hit = [hit]
if len(hit) > width:
hit = hit[:width]
pad = width - len(hit)
hit_string = " ".join(str(h) for h in hit)
hit_string += "".join(" 0" for n in range(pad))
return hit_string
class DB:
"""Main class to access SQL data"""
def __init__(self, dbpath, width=7, cached=True):
"""Disabling cache allows to use multiple DB objects in the same script
without running into collisions"""
self.path = dbpath
self.width = width
self.locals = Config(dbpath + "/db.locals.py", db_locals_defaults, db_locals_header)
self.cached = cached
def __getitem__(self, item):
if self.width != 9: # verify this isn't a page id
hit = self.get_id_lowlevel(item)
hit = [int(x) for x in hit["philo_id"].split(" ")]
return HitWrapper(hit, self)
hit = [int(x) for x in hit_to_string(item, 9).split(" ")]
return PageWrapper(hit, self)
def __getattr__(self, attr):
# We need to open DB only when accessed
# Useful when using a single instance of the DB class accross multiple processors
if attr == "dbh":
dbh = sqlite3.connect(self.path + "/toms.db", self.width)
dbh.text_factory = str
dbh.row_factory = sqlite3.Row
setattr(self, "dbh", dbh)
return self.dbh
def get_id_lowlevel(self, item):
"""Retrieve text object metadata"""
hit_s = hit_to_string(item, self.width)
c = self.dbh.cursor()
c.execute("SELECT * FROM toms WHERE philo_id=? LIMIT 1;", (hit_s,))
return c.fetchone()
def get_word(self, item):
"""Retrieve word from words table"""
word_s = hit_to_string(item, self.width)
c = self.dbh.cursor()
c.execute("SELECT * FROM words WHERE philo_id=? LIMIT 1;", (word_s,))
return c.fetchone()
def get_page(self, item):
"""Retrieve page data"""
page_id_s = " ".join(str(s) for s in item)
c = self.dbh.cursor()
c.execute("SELECT * FROM pages WHERE philo_id=? LIMIT 1;", (page_id_s,))
return c.fetchone()
def get_line(self, byte_offset, doc_id):
"""Retrieve line data"""
c = self.dbh.cursor()
try:
c.execute(
"SELECT * FROM lines WHERE doc_id=? and start_byte < ? and end_byte > ? LIMIT 1",
(doc_id, byte_offset, byte_offset),
)
return c.fetchone()
except sqlite3.OperationalError:
return ""
def get_all(self, philo_type="doc", sort_order=["rowid"], raw_results=False):
""" get all objects of type philo_type """
hash = hashlib.sha1()
hash.update(self.path.encode("utf8"))
hash.update(philo_type.encode("utf8"))
all_hash = hash.hexdigest()
all_file = self.path + "/hitlists/" + all_hash + ".hitlist"
if not os.path.isfile(all_file):
# write out the corpus file
if philo_type == "div":
param_dicts = [{"philo_type": ['"div1"|"div2"|"div3"']}]
else:
param_dicts = [{"philo_type": ['"%s"' % philo_type]}]
return MetadataQuery.metadata_query(self, all_file, param_dicts, sort_order, raw_results=raw_results)
else:
return HitList.HitList(all_file, 0, self, sort_order=sort_order, raw=raw_results)
def query(self, qs="", method="", method_arg="", limit="", sort_order=["rowid"], raw_results=False, **metadata):
"""query the PhiloLogic database"""
method = method or "proxy"
if isinstance(method_arg, str):
try:
method_arg = int(method_arg)
except (TypeError, ValueError):
if method == "cooc" or method == "sentence":
method_arg = 6
else:
method_arg = 0
if isinstance(limit, str):
try:
limit = int(limit)
except (TypeError, ValueError):
limit = 10000000
hash = hashlib.sha1()
hash.update(self.path.encode("utf8"))
has_metadata = False
corpus_file = None
for key, value in metadata.items():
if isinstance(value, str):
if value == "":
pass
else:
value = [value]
metadata[key] = value
value = [v for v in value if v]
if value:
has_metadata = True
key_value = "%s=%s" % (key, "|".join(value))
hash.update(key_value.encode("utf8"))
if has_metadata:
corpus_hash = hash.hexdigest()
corpus_file = self.path + "/hitlists/" + corpus_hash + ".hitlist"
if not os.path.isfile(corpus_file):
# before we query, we need to figure out what type each parameter belongs to,
# and sort them into a list of dictionaries, one for each type.
metadata_dicts = [{} for level in self.locals["metadata_hierarchy"]]
for k, v in list(metadata.items()):
for i, params in enumerate(self.locals["metadata_hierarchy"]):
if v and (k in params):
metadata_dicts[i][k] = v
if k in self.locals["metadata_types"]:
this_type = self.locals["metadata_types"][k]
if this_type == "div":
metadata_dicts[i]["philo_type"] = ['"div"|"div1"|"div2"|"div3"']
else:
metadata_dicts[i]["philo_type"] = ['"%s"' % self.locals["metadata_types"][k]]
metadata_dicts = [d for d in metadata_dicts if d]
if "philo_id" in metadata:
if metadata_dicts:
metadata_dicts[-1]["philo_id"] = metadata["philo_id"]
else:
metadata_dicts.append({"philo_id": metadata["philo_id"]})
corpus = MetadataQuery.metadata_query(self, corpus_file, metadata_dicts, sort_order)
else:
if sort_order == ["rowid"]:
sort_order = None
corpus = HitList.HitList(corpus_file, 0, self, sort_order=sort_order, raw=raw_results)
corpus.finish()
if len(corpus) == 0:
return corpus
else:
corpus = None
if qs:
hash.update(qs.encode("utf8"))
hash.update(method.encode("utf8"))
hash.update(str(method_arg).encode("utf8"))
hash.update(str(limit).encode("utf8"))
search_hash = hash.hexdigest()
search_file = self.path + "/hitlists/" + search_hash + ".hitlist"
if sort_order == ["rowid"]:
sort_order = None
if not os.path.isfile(search_file):
return Query.query(
self,
qs,
corpus_file,
self.width,
method,
method_arg,
limit,
filename=search_file,
sort_order=sort_order,
raw_results=raw_results,
)
parsed = QuerySyntax.parse_query(qs)
grouped = QuerySyntax.group_terms(parsed)
split = Query.split_terms(grouped)
words_per_hit = len(split)
return HitList.HitList(search_file, words_per_hit, self, sort_order=sort_order, raw=raw_results)
if corpus:
return corpus
return self.get_all(self.locals["default_object_level"], sort_order)
| gpl-3.0 |
x2Ident/x2Ident | mitmproxy/mitmproxy/contrib/wbxml/ASWBXMLCodePage.py | 2 | 1832 | #!/usr/bin/env python
'''
@author: David Shaw, [email protected]
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASWBXMLCodePage.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class ASWBXMLCodePage:
def __init__(self):
self.namespace = ""
self.xmlns = ""
self.tokenLookup = {}
self.tagLookup = {}
def addToken(self, token, tag):
self.tokenLookup[token] = tag
self.tagLookup[tag] = token
def getToken(self, tag):
if self.tagLookup.has_key(tag):
return self.tagLookup[tag]
return 0xFF
def getTag(self, token):
if self.tokenLookup.has_key(token):
return self.tokenLookup[token]
return None
def __repr__(self):
return str(self.tokenLookup) | gpl-3.0 |
ryano144/intellij-community | plugins/hg4idea/testData/bin/mercurial/bookmarks.py | 91 | 9671 | # Mercurial bookmark support code
#
# Copyright 2008 David Soria Parra <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial.i18n import _
from mercurial.node import hex
from mercurial import encoding, error, util, obsolete
import errno, os
class bmstore(dict):
"""Storage for bookmarks.
This object should do all bookmark reads and writes, so that it's
fairly simple to replace the storage underlying bookmarks without
having to clone the logic surrounding bookmarks.
This particular bmstore implementation stores bookmarks as
{hash}\s{name}\n (the same format as localtags) in
.hg/bookmarks. The mapping is stored as {name: nodeid}.
This class does NOT handle the "current" bookmark state at this
time.
"""
def __init__(self, repo):
dict.__init__(self)
self._repo = repo
try:
for line in repo.vfs('bookmarks'):
line = line.strip()
if not line:
continue
if ' ' not in line:
repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
% line)
continue
sha, refspec = line.split(' ', 1)
refspec = encoding.tolocal(refspec)
try:
self[refspec] = repo.changelog.lookup(sha)
except LookupError:
pass
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
def write(self):
'''Write bookmarks
Write the given bookmark => hash dictionary to the .hg/bookmarks file
in a format equal to those of localtags.
We also store a backup of the previous state in undo.bookmarks that
can be copied back on rollback.
'''
repo = self._repo
if repo._bookmarkcurrent not in self:
setcurrent(repo, None)
wlock = repo.wlock()
try:
file = repo.vfs('bookmarks', 'w', atomictemp=True)
for name, node in self.iteritems():
file.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
file.close()
# touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
try:
os.utime(repo.sjoin('00changelog.i'), None)
except OSError:
pass
finally:
wlock.release()
def readcurrent(repo):
'''Get the current bookmark
If we use gittish branches we have a current bookmark that
we are on. This function returns the name of the bookmark. It
is stored in .hg/bookmarks.current
'''
mark = None
try:
file = repo.opener('bookmarks.current')
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
return None
try:
# No readline() in osutil.posixfile, reading everything is cheap
mark = encoding.tolocal((file.readlines() or [''])[0])
if mark == '' or mark not in repo._bookmarks:
mark = None
finally:
file.close()
return mark
def setcurrent(repo, mark):
'''Set the name of the bookmark that we are currently on
Set the name of the bookmark that we are on (hg update <bookmark>).
The name is recorded in .hg/bookmarks.current
'''
current = repo._bookmarkcurrent
if current == mark:
return
if mark not in repo._bookmarks:
mark = ''
wlock = repo.wlock()
try:
file = repo.opener('bookmarks.current', 'w', atomictemp=True)
file.write(encoding.fromlocal(mark))
file.close()
finally:
wlock.release()
repo._bookmarkcurrent = mark
def unsetcurrent(repo):
wlock = repo.wlock()
try:
try:
util.unlink(repo.join('bookmarks.current'))
repo._bookmarkcurrent = None
except OSError, inst:
if inst.errno != errno.ENOENT:
raise
finally:
wlock.release()
def iscurrent(repo, mark=None, parents=None):
'''Tell whether the current bookmark is also active
I.e., the bookmark listed in .hg/bookmarks.current also points to a
parent of the working directory.
'''
if not mark:
mark = repo._bookmarkcurrent
if not parents:
parents = [p.node() for p in repo[None].parents()]
marks = repo._bookmarks
return (mark in marks and marks[mark] in parents)
def updatecurrentbookmark(repo, oldnode, curbranch):
try:
return update(repo, oldnode, repo.branchtip(curbranch))
except error.RepoLookupError:
if curbranch == "default": # no default branch!
return update(repo, oldnode, repo.lookup("tip"))
else:
raise util.Abort(_("branch %s not found") % curbranch)
def deletedivergent(repo, deletefrom, bm):
'''Delete divergent versions of bm on nodes in deletefrom.
Return True if at least one bookmark was deleted, False otherwise.'''
deleted = False
marks = repo._bookmarks
divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
for mark in divergent:
if mark and marks[mark] in deletefrom:
if mark != bm:
del marks[mark]
deleted = True
return deleted
def update(repo, parents, node):
deletefrom = parents
marks = repo._bookmarks
update = False
cur = repo._bookmarkcurrent
if not cur:
return False
if marks[cur] in parents:
old = repo[marks[cur]]
new = repo[node]
divs = [repo[b] for b in marks
if b.split('@', 1)[0] == cur.split('@', 1)[0]]
anc = repo.changelog.ancestors([new.rev()])
deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
if old.descendant(new):
marks[cur] = new.node()
update = True
if deletedivergent(repo, deletefrom, cur):
update = True
if update:
marks.write()
return update
def listbookmarks(repo):
# We may try to list bookmarks on a repo type that does not
# support it (e.g., statichttprepository).
marks = getattr(repo, '_bookmarks', {})
d = {}
hasnode = repo.changelog.hasnode
for k, v in marks.iteritems():
# don't expose local divergent bookmarks
if hasnode(v) and ('@' not in k or k.endswith('@')):
d[k] = hex(v)
return d
def pushbookmark(repo, key, old, new):
w = repo.wlock()
try:
marks = repo._bookmarks
if hex(marks.get(key, '')) != old:
return False
if new == '':
del marks[key]
else:
if new not in repo:
return False
marks[key] = repo[new].node()
marks.write()
return True
finally:
w.release()
def updatefromremote(ui, repo, remotemarks, path):
ui.debug("checking for updated bookmarks\n")
changed = False
localmarks = repo._bookmarks
for k in sorted(remotemarks):
if k in localmarks:
nr, nl = remotemarks[k], localmarks[k]
if nr in repo:
cr = repo[nr]
cl = repo[nl]
if cl.rev() >= cr.rev():
continue
if validdest(repo, cl, cr):
localmarks[k] = cr.node()
changed = True
ui.status(_("updating bookmark %s\n") % k)
else:
if k == '@':
kd = ''
else:
kd = k
# find a unique @ suffix
for x in range(1, 100):
n = '%s@%d' % (kd, x)
if n not in localmarks:
break
# try to use an @pathalias suffix
# if an @pathalias already exists, we overwrite (update) it
for p, u in ui.configitems("paths"):
if path == u:
n = '%s@%s' % (kd, p)
localmarks[n] = cr.node()
changed = True
ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
elif remotemarks[k] in repo:
# add remote bookmarks for changes we already have
localmarks[k] = repo[remotemarks[k]].node()
changed = True
ui.status(_("adding remote bookmark %s\n") % k)
if changed:
localmarks.write()
def diff(ui, dst, src):
ui.status(_("searching for changed bookmarks\n"))
smarks = src.listkeys('bookmarks')
dmarks = dst.listkeys('bookmarks')
diff = sorted(set(smarks) - set(dmarks))
for k in diff:
mark = ui.debugflag and smarks[k] or smarks[k][:12]
ui.write(" %-25s %s\n" % (k, mark))
if len(diff) <= 0:
ui.status(_("no changed bookmarks found\n"))
return 1
return 0
def validdest(repo, old, new):
"""Is the new bookmark destination a valid update from the old one"""
repo = repo.unfiltered()
if old == new:
# Old == new -> nothing to update.
return False
elif not old:
# old is nullrev, anything is valid.
# (new != nullrev has been excluded by the previous check)
return True
elif repo.obsstore:
return new.node() in obsolete.foreground(repo, [old.node()])
else:
# still an independant clause as it is lazyer (and therefore faster)
return old.descendant(new)
| apache-2.0 |
AICP/external_chromium_org | tools/check_ecs_deps/check_ecs_deps.py | 43 | 4858 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Verifies that builds of the embedded content_shell do not included
unnecessary dependencies.'''
import os
import re
import string
import subprocess
import sys
import optparse
kUndesiredLibraryList = [
'libX11',
'libXau',
'libXcomposite',
'libXcursor',
'libXdamage',
'libXdmcp',
'libXext',
'libXfixes',
'libXi',
'libXrandr',
'libXrender',
'libXtst',
'libasound',
'libcairo',
'libdbus',
'libffi',
'libgconf',
'libgio',
'libglib',
'libgmodule',
'libgobject',
'libpango',
'libpcre',
'libpixman',
'libpng',
'libresolv',
'libselinux',
'libudev',
'libxcb',
]
kAllowedLibraryList = [
# Toolchain libraries (gcc/glibc)
'ld-linux',
'libc',
'libdl',
'libgcc_s',
'libm',
'libpthread',
'librt',
'libstdc++',
'linux-vdso',
# Needed for default ozone platforms
'libdrm',
# NSS & NSPR
'libnss3',
'libnssutil3',
'libnspr4',
'libplc4',
'libplds4',
'libsmime3',
# Miscellaneous
'libcap',
'libexpat',
'libfontconfig',
'libz',
]
binary_target = 'content_shell'
def stdmsg(_final, errors):
if errors:
for message in errors:
print message
def bbmsg(final, errors):
if errors:
for message in errors:
print '@@@STEP_TEXT@%s@@@' % message
if final:
print '\n@@@STEP_%s@@@' % final
def _main():
output = {
'message': lambda x: stdmsg(None, x),
'fail': lambda x: stdmsg('FAILED', x),
'warn': lambda x: stdmsg('WARNING', x),
'abend': lambda x: stdmsg('FAILED', x),
'ok': lambda x: stdmsg('SUCCESS', x),
'verbose': lambda x: None,
}
parser = optparse.OptionParser(
"usage: %prog -b <dir> --target <Debug|Release>")
parser.add_option("", "--annotate", dest='annotate', action='store_true',
default=False, help="include buildbot annotations in output")
parser.add_option("", "--noannotate", dest='annotate', action='store_false')
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option('-v', '--verbose', default=False, action='store_true')
options, args = parser.parse_args()
if args:
parser.usage()
return -1
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.build_dir != None:
build_dir = os.path.abspath(options.build_dir)
else:
build_dir = os.getcwd()
target = os.path.join(build_dir, binary_target)
if options.annotate:
output.update({
'message': lambda x: bbmsg(None, x),
'fail': lambda x: bbmsg('FAILURE', x),
'warn': lambda x: bbmsg('WARNINGS', x),
'abend': lambda x: bbmsg('EXCEPTIONS', x),
'ok': lambda x: bbmsg(None, x),
})
if options.verbose:
output['verbose'] = lambda x: stdmsg(None, x)
forbidden_regexp = re.compile(string.join(map(re.escape,
kUndesiredLibraryList), '|'))
mapping_regexp = re.compile(r"\s*([^/]*) => (.*)")
blessed_regexp = re.compile(r"(%s)[-0-9.]*\.so" % string.join(map(re.escape,
kAllowedLibraryList), '|'))
built_regexp = re.compile(re.escape(build_dir + os.sep))
success = 0
warning = 0
p = subprocess.Popen(['ldd', target], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err != '':
output['abend']([
'Failed to execute ldd to analyze dependencies for ' + target + ':',
' ' + err,
])
return 1
if out == '':
output['abend']([
'No output to scan for forbidden dependencies.'
])
return 1
success = 1
deps = string.split(out, '\n')
for d in deps:
libmatch = mapping_regexp.match(d)
if libmatch:
lib = libmatch.group(1)
source = libmatch.group(2)
if forbidden_regexp.search(lib):
success = 0
output['message'](['Forbidden library: ' + lib])
elif built_regexp.match(source):
output['verbose'](['Built library: ' + lib])
elif blessed_regexp.match(lib):
output['verbose'](['Blessed library: ' + lib])
else:
warning = 1
output['message'](['Unexpected library: ' + lib])
if success == 1:
if warning == 1:
output['warn'](None)
else:
output['ok'](None)
return 0
else:
output['fail'](None)
return 1
if __name__ == "__main__":
# handle arguments...
# do something reasonable if not run with one...
sys.exit(_main())
| bsd-3-clause |
Gchorba/Ask | lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py | 172 | 6278 | from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
| mit |
ixc/django-fluent-contents | fluent_contents/plugins/twitterfeed/south_migrations/0001_initial.py | 3 | 6928 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("fluent_contents", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'TwitterRecentEntriesItem'
db.create_table('contentitem_twitterfeed_twitterrecententriesitem', (
('contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_retweets', self.gf('django.db.models.fields.BooleanField')(default=False)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('twitterfeed', ['TwitterRecentEntriesItem'])
# Adding model 'TwitterSearchItem'
db.create_table('contentitem_twitterfeed_twittersearchitem', (
('contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_retweets', self.gf('django.db.models.fields.BooleanField')(default=False)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('twitterfeed', ['TwitterSearchItem'])
def backwards(self, orm):
# Deleting model 'TwitterRecentEntriesItem'
db.delete_table('contentitem_twitterfeed_twitterrecententriesitem')
# Deleting model 'TwitterSearchItem'
db.delete_table('contentitem_twitterfeed_twittersearchitem')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'twitterfeed.twitterrecententriesitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterRecentEntriesItem', 'db_table': "'contentitem_twitterfeed_twitterrecententriesitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'include_retweets': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
'twitterfeed.twittersearchitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterSearchItem', 'db_table': "'contentitem_twitterfeed_twittersearchitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'include_retweets': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['twitterfeed']
| apache-2.0 |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/urls.py | 2 | 44764 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]>, 2015
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License. PSF License text
# follows:
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
# retained in Python alone or in any derivative version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
import base64
import netrc
import os
import platform
import re
import socket
import sys
import tempfile
import traceback
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
import ansible.module_utils.six.moves.urllib.request as urllib_request
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.basic import get_distribution
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler
except ImportError:
# python2
import urllib2 as urllib_request
from urllib2 import AbstractHTTPHandler
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
LOADED_VERIFY_LOCATIONS = set()
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
"""The match_hostname() function from Python 3.4, essential when using SSL."""
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or subjectAltName fields were found")
# End of Python Software Foundation Licensed code
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for Mac OS since you need at least 1
# ca cert, regardless of validity, for Python on Mac OS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = CustomHTTPSHandler = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = create_default_context()
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler):
def https_open(self, req):
return self.do_open(CustomHTTPSConnection, req)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
return httplib.HTTPSConnection(host, **kwargs)
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = dict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
generic_parts['hostname'] = parts.hostname
generic_parts['port'] = parts.port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above includes the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
handler = maybe_add_ssl_handler(newurl, validate_certs)
if handler:
urllib_request._opener.add_handler(handler)
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
do_redirect = False
if follow_redirects in ['all', 'yes', True]:
do_redirect = (code >= 300 and code < 400)
elif follow_redirects == 'safe':
m = req.get_method()
do_redirect = (code >= 300 and code < 400 and m in ('GET', 'HEAD'))
if do_redirect:
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
try:
# Python 2-3.3
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
origin_req_host = req.origin_req_host
return urllib_request.Request(newurl,
headers=newheaders,
origin_req_host=origin_req_host,
unverifiable=True)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
msg.append(' (the python executable used (%s) is version: %s)' %
(sys.executable, ''.join(sys.version.splitlines())))
if not HAS_URLLIB3_PYOPENSSLCONTEXT or not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
paths_checked = []
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
paths_checked.append('/etc/ssl/certs')
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_fd, tmp_path = tempfile.mkstemp()
to_add_fd, to_add_path = tempfile.mkstemp()
to_add = False
# Write the dummy ca cert if we are running on Mac OS X
if system == u'Darwin':
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
try:
cert_file = open(full_path, 'rb')
cert = cert_file.read()
cert_file.close()
os.write(tmp_fd, cert)
os.write(tmp_fd, b'\n')
if full_path not in LOADED_VERIFY_LOCATIONS:
to_add = True
os.write(to_add_fd, cert)
os.write(to_add_fd, b'\n')
LOADED_VERIFY_LOCATIONS.add(full_path)
except (OSError, IOError):
pass
if not to_add:
to_add_path = None
return (tmp_path, to_add_path, paths_checked)
def validate_proxy_response(self, response, valid_codes=[200]):
'''
make sure we get back a valid code from the proxy
'''
try:
(http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def _make_context(self, to_add_ca_cert_path):
if HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
context = create_default_context()
if to_add_ca_cert_path:
context.load_verify_locations(to_add_ca_cert_path)
return context
def http_request(self, req):
tmp_ca_cert_path, to_add_ca_cert_path, paths_checked = self.get_ca_certs()
https_proxy = os.environ.get('https_proxy')
context = None
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
context = self._make_context(to_add_ca_cert_path)
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
if not use_proxy:
# ignore proxy settings for this host request
return req
try:
if https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
s = socket.create_connection((proxy_parts.get('hostname'), port))
if proxy_parts.get('scheme') == 'http':
s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b'\r\n')
connect_result = b""
while connect_result.find(b"\r\n\r\n") <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s = socket.create_connection((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
# ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError) as e:
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error as e:
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
os.remove(tmp_ca_cert_path)
except:
pass
try:
# cleanup the temp file created, don't worry
# if it fails for some reason
if to_add_ca_cert_path:
os.remove(to_add_ca_cert_path)
except:
pass
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs):
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# do the cert validation
netloc = parsed[1]
if '@' in netloc:
netloc = netloc.split('@', 1)[1]
if ':' in netloc:
hostname, port = netloc.split(':', 1)
port = int(port)
else:
hostname = netloc
port = 443
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(hostname, port)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
handlers = []
ssl_handler = maybe_add_ssl_handler(url, validate_certs)
if ssl_handler:
handlers.append(ssl_handler)
# FIXME: change the following to use the generic_urlparse function
# to remove the indexed references for 'parsed'
parsed = urlparse(url)
if parsed[0] != 'ftp':
username = url_username
if headers is None:
headers = {}
if username:
password = url_password
netloc = parsed[1]
elif '@' in parsed[1]:
credentials, netloc = parsed[1].split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed = list(parsed)
parsed[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed)
if username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed[1])
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context))
elif client_cert:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key))
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
handlers.append(CustomHTTPSHandler)
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs))
# add some nicer cookie handling
if cookies is not None:
handlers.append(urllib_request.HTTPCookieProcessor(cookies))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
if method:
if method.upper() not in ('OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT', 'PATCH'):
raise ConnectionError('invalid HTTP request method; %s' % method.upper())
request = RequestWithMethod(url, method.upper(), data)
else:
request = urllib_request.Request(url, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
if headers:
if not isinstance(headers, dict):
raise ValueError("headers provided to fetch_url() must be a dict")
for header in headers:
request.add_header(header, headers[header])
urlopen_args = [request, None]
if sys.version_info >= (2, 6, 0):
# urlopen in python prior to 2.6.0 did not
# have a timeout parameter
urlopen_args.append(timeout)
r = urllib_request.urlopen(*urlopen_args)
return r
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(),
force=dict(default='no', aliases=['thirsty'], type='bool'),
http_agent=dict(default='ansible-httpget'),
use_proxy=dict(default='yes', type='bool'),
validate_certs=dict(default='yes', type='bool'),
url_username=dict(required=False),
url_password=dict(required=False, no_log=True),
force_basic_auth=dict(required=False, type='bool', default='no'),
client_cert=dict(required=False, type='path', default=None),
client_key=dict(required=False, type='path', default=None),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10):
"""Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:returns: A tuple of (**response**, **info**). Use ``response.body()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status > 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url(module,
"http://example.com",
data=module.jsonify(data)
header={Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
"""
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', 'ansible-httpget')
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
cookies = cookiejar.LWPCookieJar()
r = None
info = dict(url=url)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key, cookies=cookies)
info.update(r.info())
# parse the cookies into a nice dictionary
cookie_dict = dict()
for cookie in cookies:
cookie_dict[cookie.name] = cookie.value
info['cookies'] = cookie_dict
# finally update the result with a message about the fetch
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e))
else:
module.fail_json(msg='%s' % to_native(e))
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e))
except urllib_error.HTTPError as e:
try:
body = e.read()
except AttributeError:
body = ''
# Try to add exception info to the output but don't fail if we can't
try:
info.update(dict(**e.info()))
except:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
except socket.error as e:
info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
except Exception as e:
info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
exception=traceback.format_exc())
return r, info
| bsd-3-clause |
recipy/recipy | integration_test/process.py | 1 | 1547 | """
Functions to execute commands via the operating system.
"""
# Copyright (c) 2016 University of Edinburgh.
from __future__ import (nested_scopes, generators, division,
absolute_import, with_statement,
print_function, unicode_literals)
from subprocess import call
from tempfile import TemporaryFile
def execute(command, stdout=None, stderr=None):
"""
Run a command via the operating system.
:param command: Command to run plus any arguments
:type command: list of str or unicode
:param stdout: File for standard output stream
:type stdout: file
:param stderr: File for standard error stream
:type stderr: file
:return: exit code
:rtype: int
:raises OSError: if there are problems running the command
"""
print((" ".join(command)))
return_code = call(command, stdout=stdout, stderr=stderr)
return return_code
def execute_and_capture(command):
"""
Run a command via the operating system and capture and return
standard output and standard error.
:param command: Command to run plus any arguments
:type command: list of str or unicode
:return: (exit code, standard output and error)
:rtype: (int, str or unicode)
:raises OSError: if there are problems running the command
"""
with TemporaryFile(mode='w+', suffix="log") as stdouterr:
result = execute(command, stdout=stdouterr, stderr=stdouterr)
stdouterr.seek(0)
log = stdouterr.readlines()
return (result, log)
| apache-2.0 |
adereis/avocado | selftests/unit/test_sysinfo.py | 1 | 3553 | import os
import sys
import tempfile
import shutil
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
from avocado.core import sysinfo
class SysinfoTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="sysinfo_unittest")
def testLoggablesEqual(self):
cmd1 = sysinfo.Command("ls -l")
cmd2 = sysinfo.Command("ls -l")
self.assertEqual(cmd1, cmd2)
file1 = sysinfo.Logfile("/proc/cpuinfo")
file2 = sysinfo.Logfile("/proc/cpuinfo")
self.assertEqual(file1, file2)
def testLoggablesNotEqual(self):
cmd1 = sysinfo.Command("ls -l")
cmd2 = sysinfo.Command("ls -la")
self.assertNotEqual(cmd1, cmd2)
file1 = sysinfo.Logfile("/proc/cpuinfo")
file2 = sysinfo.Logfile("/etc/fstab")
self.assertNotEqual(file1, file2)
def testLoggablesSet(self):
container = set()
cmd1 = sysinfo.Command("ls -l")
cmd2 = sysinfo.Command("ls -l")
cmd3 = sysinfo.Command("ps -ef")
cmd4 = sysinfo.Command("uname -a")
file1 = sysinfo.Command("/proc/cpuinfo")
file2 = sysinfo.Command("/etc/fstab")
file3 = sysinfo.Command("/etc/fstab")
file4 = sysinfo.Command("/etc/fstab")
# From the above 8 objects, only 5 are unique loggables.
container.add(cmd1)
container.add(cmd2)
container.add(cmd3)
container.add(cmd4)
container.add(file1)
container.add(file2)
container.add(file3)
container.add(file4)
self.assertEqual(len(container), 5)
def test_logger_job_hooks(self):
jobdir = os.path.join(self.tmpdir, 'job')
sysinfo_logger = sysinfo.SysInfo(basedir=jobdir)
sysinfo_logger.start_job_hook()
self.assertTrue(os.path.isdir(jobdir))
self.assertGreaterEqual(len(os.listdir(jobdir)), 1,
"Job does not have 'pre' dir")
job_predir = os.path.join(jobdir, 'pre')
self.assertTrue(os.path.isdir(job_predir))
sysinfo_logger.end_job_hook()
job_postdir = os.path.join(jobdir, 'post')
self.assertTrue(os.path.isdir(job_postdir))
def test_logger_test_hooks(self):
testdir = os.path.join(self.tmpdir, 'job', 'test1')
sysinfo_logger = sysinfo.SysInfo(basedir=testdir)
sysinfo_logger.start_test_hook()
self.assertTrue(os.path.isdir(testdir))
self.assertGreaterEqual(len(os.listdir(testdir)), 1,
"Test does not have 'pre' dir")
test_predir = os.path.join(testdir, 'pre')
self.assertTrue(os.path.isdir(test_predir))
# By default, there are no pre test files
self.assertEqual(len(os.listdir(test_predir)), 0,
"Test pre dir is not empty")
sysinfo_logger.end_test_hook()
self.assertGreaterEqual(len(os.listdir(testdir)), 2,
"Test does not have 'pre' dir")
job_postdir = os.path.join(testdir, 'post')
self.assertTrue(os.path.isdir(job_postdir))
# By default, there are no post test files
self.assertLess(len(os.listdir(job_postdir)), 3,
"Post dir can contain 0-2 files depending on whether "
"sys messages are obtainable or not:\n%s"
% os.listdir(job_postdir))
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
pandaxcl/gyp | test/mac/gyptest-postbuild.py | 345 | 1708 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that postbuild steps work.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='postbuilds')
test.build('test.gyp', test.ALL, chdir='postbuilds')
# See comment in test/subdirectory/gyptest-subdir-default.py
if test.format == 'xcode':
chdir = 'postbuilds/subdirectory'
else:
chdir = 'postbuilds'
# Created by the postbuild scripts
test.built_file_must_exist('el.a_touch',
type=test.STATIC_LIB,
chdir='postbuilds')
test.built_file_must_exist('el.a_gyp_touch',
type=test.STATIC_LIB,
chdir='postbuilds')
test.built_file_must_exist('nest_el.a_touch',
type=test.STATIC_LIB,
chdir=chdir)
test.built_file_must_exist(
'dyna.framework/Versions/A/dyna_touch',
chdir='postbuilds')
test.built_file_must_exist(
'dyna.framework/Versions/A/dyna_gyp_touch',
chdir='postbuilds')
test.built_file_must_exist(
'nest_dyna.framework/Versions/A/nest_dyna_touch',
chdir=chdir)
test.built_file_must_exist('dyna_standalone.dylib_gyp_touch',
type=test.SHARED_LIB,
chdir='postbuilds')
test.built_file_must_exist('copied_file.txt', chdir='postbuilds')
test.built_file_must_exist('copied_file_2.txt', chdir=chdir)
test.pass_test()
| bsd-3-clause |
Troyhy/django-registration | registration/backends/simple/urls.py | 63 | 1259 | """
URLconf for registration and activation, using django-registration's
one-step backend.
If the default behavior of these views is acceptable to you, simply
use a line like this in your root URLconf to set up the default URLs
for registration::
(r'^accounts/', include('registration.backends.simple.urls')),
This will also automatically set up the views in
``django.contrib.auth`` at sensible default locations.
If you'd like to customize registration behavior, feel free to set up
your own URL patterns for these views instead.
"""
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from django.views.generic.base import TemplateView
from registration.backends.simple.views import RegistrationView
urlpatterns = patterns('',
url(r'^register/$',
RegistrationView.as_view(),
name='registration_register'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
(r'', include('registration.auth_urls')),
)
| bsd-3-clause |
rande/python-shirka | shirka/responders/math.py | 1 | 1027 | # vim: set fileencoding=utf-8 :
from shirka.responders import Responder
from sympy.parsing.sympy_parser import parse_expr
import exceptions
from shirka.consumers import BaseTestCase
class MathResponder(Responder):
def name(self):
return 'math'
def generate(self, request):
"""
usage: math operations
Computes operations
"""
try:
return "%s\n=> %s" % (request[5:], float(parse_expr(request[5:])))
except exceptions.Exception, e:
return "Parse error: %s" % e.message
class TestMathResponder(BaseTestCase):
def setUp(self):
self.responder = MathResponder()
def test_support(self):
self.assertTrue(self.responder.support(self.create_request("math")))
self.assertFalse(self.responder.support(self.create_request("fuu")))
def test_valid(self):
self.assertEquals(self.generate("math 1 + 1"), "1 + 1\n=> 2.0")
def test_on_start(self):
self.assertFalse(self.responder.on_start(False))
| mit |
barbarubra/Don-t-know-What-i-m-doing. | python/src/Lib/ctypes/_endian.py | 153 | 2041 | ######################################################################
# This file should be kept compatible with Python 2.3, see PEP 291. #
######################################################################
import sys
from ctypes import *
_array_type = type(c_int * 3)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported.
"""
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super(_swapped_meta, self).__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure):
"""Structure with big endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure):
"""Structure with little endian byte order"""
__metaclass__ = _swapped_meta
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
| apache-2.0 |
RebuiltBits/django-paypal | paypal/standard/ipn/south_migrations/0004_auto__chg_field_paypalipn_ipaddress.py | 12 | 14525 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalIPN.ipaddress'
db.alter_column(u'paypal_ipn', 'ipaddress', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True))
def backwards(self, orm):
# Changing field 'PayPalIPN.ipaddress'
db.alter_column(u'paypal_ipn', 'ipaddress', self.gf('django.db.models.fields.IPAddressField')(default='', max_length=15))
models = {
u'ipn.paypalipn': {
'Meta': {'object_name': 'PayPalIPN', 'db_table': "u'paypal_ipn'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'num_cart_items': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'option_name1': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'option_name2': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'outstanding_balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'parent_txn_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'payer_business_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_email': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'payer_id': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'payer_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'payment_cycle': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'payment_status': ('django.db.models.fields.CharField', [], {'max_length': '17', 'blank': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'pending_reason': ('django.db.models.fields.CharField', [], {'max_length': '14', 'blank': 'True'}),
'period1': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period3': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'period_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'product_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'profile_status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'protection_eligibility': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reason_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'reattempt': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'receiver_email': ('django.db.models.fields.EmailField', [], {'max_length': '127', 'blank': 'True'}),
'receiver_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'recur_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'recurring': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'recurring_payment_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'remaining_settle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'residence_country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'retry_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rp_invoice_id': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'settle_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'subscr_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_effective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscr_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'test_ipn': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'transaction_entity': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'transaction_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'txn_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '19', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'verify_sign': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['ipn'] | mit |
phalt/django | django/templatetags/tz.py | 57 | 5411 | from datetime import datetime, tzinfo
import pytz
from django.template import Library, Node, TemplateSyntaxError
from django.utils import six, timezone
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types):
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, the default time zone is
used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
| bsd-3-clause |
patmcb/odoo | openerp/workflow/instance.py | 314 | 5594 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
from openerp.workflow.helpers import Session
from openerp.workflow.helpers import Record
from openerp.workflow.workitem import WorkflowItem
class WorkflowInstance(object):
def __init__(self, session, record, values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not values:
values = {}
assert isinstance(values, dict)
self.instance = values
@classmethod
def create(cls, session, record, workflow_id):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(workflow_id, (int, long))
cr = session.cr
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id,state) values (%s,%s,%s,%s,%s) RETURNING id', (record.model, record.id, session.uid, workflow_id, 'active'))
instance_id = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (workflow_id,))
stack = []
activities = cr.dictfetchall()
for activity in activities:
WorkflowItem.create(session, record, activity, instance_id, stack)
cr.execute('SELECT * FROM wkf_instance WHERE id = %s', (instance_id,))
values = cr.dictfetchone()
wi = WorkflowInstance(session, record, values)
wi.update()
return wi
def delete(self):
self.session.cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
def validate(self, signal, force_running=False):
assert isinstance(signal, basestring)
assert isinstance(force_running, bool)
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
stack = []
for work_item_values in cr.dictfetchall():
wi = WorkflowItem(self.session, self.record, work_item_values)
wi.process(signal=signal, force_running=force_running, stack=stack)
# An action is returned
self._update_end()
return stack and stack[0] or False
def update(self):
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
for work_item_values in cr.dictfetchall():
stack = []
WorkflowItem(self.session, self.record, work_item_values).process(stack=stack)
return self._update_end()
def _update_end(self):
cr = self.session.cr
instance_id = self.instance['id']
cr.execute('select wkf_id from wkf_instance where id=%s', (instance_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (instance_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (instance_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (instance_id,))
for cur_instance_id, cur_model_name, cur_record_id in cr.fetchall():
cur_record = Record(cur_model_name, cur_record_id)
for act_name in act_names:
WorkflowInstance(self.session, cur_record, {'id':cur_instance_id}).validate('subflow.%s' % act_name[0])
return ok
def create(session, record, workflow_id):
return WorkflowInstance(session, record).create(workflow_id)
def delete(session, record):
return WorkflowInstance(session, record).delete()
def validate(session, record, instance_id, signal, force_running=False):
return WorkflowInstance(session, record).validate(instance_id, signal, force_running)
def update(session, record, instance_id):
return WorkflowInstance(session, record).update(instance_id)
def _update_end(session, record, instance_id):
return WorkflowInstance(session, record)._update_end(instance_id)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
heisencoder/route-finder | third_party/lib/python/django/contrib/sessions/backends/cached_db.py | 103 | 2723 | """
Cached, database-backed sessions.
"""
import logging
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import cache
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return KEY_PREFIX + self._get_or_create_session_key()
def load(self):
try:
data = cache.get(self.cache_key, None)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = Session.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
cache.set(self.cache_key, data,
self.get_expiry_age(expiry=s.expire_date))
except (Session.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
data = {}
return data
def exists(self, session_key):
if (KEY_PREFIX + session_key) in cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
cache.delete(KEY_PREFIX + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self.create()
# At bottom to avoid circular import
from django.contrib.sessions.models import Session
| mit |
GovReady/govready-q | loadtesting/web.py | 1 | 5269 | import logging
import requests
import structlog
import parsel
from random import sample
import re
from django.test import Client
from django.test import RequestFactory
from siteapp.urls import urlpatterns
from django.urls.exceptions import Resolver404 as Resolver404
from siteapp.models import *
from guidedmodules.models import *
logging.basicConfig()
logger = get_logger()
class WebClient():
session = None
response = None
selector = None
base_url = None
projects = None
comp_links = None
current_url = ''
def __init__(self, username, org_slug):
self.user = User.objects.get(username=username)
self.org = Organization.objects.get(slug=org_slug)
self.session = RequestFactory()
def _url(self, path):
# currently a no-op function, but for debug purposes it is useful to be able to change path handling in one spot
return path
def _use_page(self, response):
self.response = response
if (self.response.status_code != 200):
print("Got a non-200 response: {}".format(self.response.status_code))
if (self.response.status_code == 302):
print("proper URL: {}".format(self.response.url))
self.selector = parsel.Selector(text=response.content.decode('utf-8'))
self.html_debug(dir="/tmp/")
def _resolve(self, req):
# see https://stackoverflow.com/a/12011907
# we might actually want to switch to the "test client" rather than RequstFactory
from django.contrib.messages.storage.fallback import FallbackStorage
setattr(req, 'session', 'session')
messages = FallbackStorage(req)
setattr(req, '_messages', messages)
self.current_url = req.path
for url in urlpatterns:
try:
match = url.resolve(req.path[1:])
if match:
self._use_page(match.func(req, *match.args, **match.kwargs))
return
except Resolver404:
logger.error(event="_resolve_path", msg="404: Failed to resolve url request path")
raise Exception("{} not resolved".format(req.path))
def load(self, path):
url = self._url(path)
print("GET on: <{}>".format(url))
req = self.session.get(url)
req.user = self.user
req.organization = self.org
self._resolve(req)
def form_fields(self, css):
return self.form_fields_by_ref(self.selector.css(css))
def form_fields_by_ref(self, form):
return dict([
(x.xpath('@name').get(), x.xpath('@value').get())
for x in form.css('input')
])
def form(self, css, fields={}):
form = self.selector.css(css)
return self.form_by_ref(form, fields)
def form_by_ref(self, form, fields={}):
base_fields = self.form_fields_by_ref(form)
for (key, val) in fields.items():
base_fields[key] = val
if 'action' in form.attrib:
url = self._url(form.attrib['action'])
else:
url = self.base_url
self.post(url, base_fields)
def post(self, url, fields):
print("POST on: <{}>".format(url))
req = self.session.post(url, fields)
req.user = self.user
req.organization = self.org
self._resolve(req)
def add_system(self):
portfolio = sample(list(self.user.portfolio_list()), 1)[0]
print("Adding project to portfolio: {} (#{})".format(portfolio.title, portfolio.id))
self.post("/store/govready-q-files-startpack/System-Description-Demo?portfolio={}".format(portfolio.id), {"organization":self.org.slug})
print(self.response.url)
def get_projects(self):
if not self.projects:
self.load('/projects')
self.projects = self.selector.css('a::attr("href")').re('^/projects/.*')
return self.projects
def load_project(self):
self.load(self.get_projects()[-1])
def start_section_for_proj(self, id):
url = Project.objects.get(id=id).get_absolute_url()
self.load(url)
all_forms = self.selector.css('form.start-task')
if len(all_forms) == 0:
return 0
form = all_forms[0]
self.form_by_ref(form)
return len(all_forms)
def load_task(self):
self.load_project()
task = sample([x.attrib['href'] for x in self.selector.css('.question-column [href^="/task"]')], 1)[0]
self.load(task)
# after loading a task
def pick_section(self):
section = sample(self.selector.css('.question'), 1)[0]
if len(section.css('form')) > 0:
self.form_by_ref(section.css('form')[0])
elif len(section.css('[href^="/tasks/"]')) > 0:
self.load(section.css('[href^="/tasks/"]').attrib['href'])
else:
print("something was missing here")
def fill_questions(self):
sel = '.row .form-group .form-control'
form = self.selector.css(sel)
def html_debug(self, filename="test.html", dir="siteapp/static/"):
with open(dir + filename, 'w') as file:
file.write(self.response.content.decode('utf-8'))
#return self.response.url
| gpl-3.0 |
glennhickey/toil-vg | src/toil_vg/test/test_vg.py | 1 | 49176 | import logging
import shlex
import shutil
import subprocess
import tempfile
import textwrap
import filecmp
import shutil
from contextlib import closing
from unittest import TestCase, skip
from urlparse import urlparse
from uuid import uuid4
import urllib2, gzip
import os, sys
import posixpath
import pytest
from toil_vg.iostore import IOStore
log = logging.getLogger(__name__)
class VGCGLTest(TestCase):
"""
Test toil-vg to make sure it is working correctly on some fixed input data.
"""
@classmethod
def setUpClass(cls):
super(VGCGLTest, cls).setUpClass()
# FIXME: pull up into common base class
logging.basicConfig(level=logging.INFO)
def _ci_input_path(self, filename):
"""
Get the URL from which an input file can be obtained.
"""
return 'https://{}.s3.amazonaws.com/{}/{}'.format(self.bucket_name, self.folder_name, filename)
def _download_input(self, filename, local_path = None):
# Where should we put this input file?
tgt = os.path.join(self.workdir, filename) if local_path is None else local_path
# And where does it come from?
url = self._ci_input_path(filename)
print(url)
with open(tgt, 'w') as f:
# Download the file from the URL
connection = urllib2.urlopen(url)
shutil.copyfileobj(connection, f)
def setUp(self):
# Set this to True to poke around in the outsores for debug purposes
self.saveWorkDir = False
self.workdir = './toil-vgci_work' if self.saveWorkDir else tempfile.mkdtemp()
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
self.jobStoreLocal = '{}/local-testvg-{}'.format(self.workdir, uuid4())
# Determine if we can use Docker or not
self.containerType = os.environ.get('TOIL_VG_TEST_CONTAINER', 'Docker')
# input files all in same bucket folder, which is specified (only) here:
self.bucket_name = 'vg-data'
self.folder_name = 'toil_vg_ci'
self.base_command = ['toil-vg', 'run',
'--container', self.containerType,
'--realTimeLogging', '--logInfo', '--reads_per_chunk', '8000',
'--call_chunk_size', '20000',
'--gcsa_index_cores', '8',
'--alignment_cores', '4',
'--calling_cores', '4', '--vcfeval_cores', '4',
'--vcfeval_opts', ' --ref-overlap',
'--call_opts', '-E 0']
# default output store
self.local_outstore = os.path.join(self.workdir, 'toilvg-jenkinstest-outstore-{}'.format(uuid4()))
# TODO: Since this might be on NFS, and sicne Toil can't clean up a job
# store on NFS when the job finishes due to
# https://github.com/DataBiosphere/toil/issues/2162, we need to pass
# '--clean', 'never' to all our Toil workflows. We the test harness will clean up after them.
def test_01_sim_small(self):
'''
This test uses simulated reads from the small dataset from vg, created as follows:
vg construct -r test/small/x.fa -v test/small/x.vcf.gz > small.vg
vg index -x small.xg small.vg
vg sim -x small.xg -l 100 -n 10000 -s 0 -e 0.001 -i 0.0001 > small_sim_reads
# Reads converted to small_sim_reads.fq with script setting qualities to B
(small.vcf.gz and small.fa.gz below are just x.vcf.gz and x.fa from input)
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._run(self.base_command +
[self.jobStoreLocal, '1',
self.local_outstore, '--clean', 'never',
'--fastq', self.sample_reads,
'--graphs', self.test_vg_graph,
'--chroms', 'x', '--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, '--vcfeval_opts', ' --squash-ploidy'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('1', self.local_outstore, f1_threshold=0.95)
def test_02_sim_small_standalone(self):
'''
Same as above, but chain standalone tools instead of toil-vg run
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--gcsa_index',
'--xg_index', '--snarls_index', '--id_ranges_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'map', self.jobStoreLocal, 'sample',
self.local_outstore,
'--xg_index', os.path.join(self.local_outstore, 'small.xg'),
'--gcsa_index', os.path.join(self.local_outstore, 'small.gcsa'),
'--container', self.containerType,
'--clean', 'never',
'--fastq', self.sample_reads,
'--alignment_cores', '8', '--reads_per_chunk', '8000',
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'call', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), 'sample',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gams', os.path.join(self.local_outstore, 'sample_default.gam'),
'--chroms', 'x', '--call_chunk_size', '20000', '--calling_cores', '4',
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'vcfeval', self.jobStoreLocal,
'--container', self.containerType,
'--call_vcf', os.path.join(self.local_outstore, 'sample.vcf.gz'),
'--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, self.local_outstore,
'--clean', 'never',
'--vcfeval_opts', ' --squash-ploidy', '--vcfeval_sample', 'sample',
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput(None, self.local_outstore, f1_threshold=0.95)
def test_03_sim_small_mapeval_plots(self):
'''
Test running mapeval directly on the simulated reads and that plotting
produces output
'''
self.test_vg_graph = self._ci_input_path('small.vg')
self._download_input('NA12877.brca1.bam_1.fq.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
# check running mapeval on the vg graph
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--xg_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'sim', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), '2000',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam', '--sim_chunks', '5', '--maxCores', '8',
'--sim_opts', ' -l 150 -p 500 -v 50', '--seed', '1',
'--fastq', os.path.join(self.workdir, 'NA12877.brca1.bam_1.fq.gz')])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'mapeval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--truth', os.path.join(self.local_outstore, 'true.pos'),
'--vg-graphs', self.test_vg_graph,
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--gam-names', 'vg', '--realTimeLogging', '--logInfo',
'--alignment_cores', '8', '--single-only', '--multipath-only',
'--maxCores', '8', '--fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertMapEvalOutput(self.local_outstore, 4000, ['vg-mp'], 0.9)
# check running plot on the mapeval output
os.unlink(os.path.join(self.local_outstore, 'plots/plot-pr.svg'))
os.unlink(os.path.join(self.local_outstore, 'plots/plot-qq.svg'))
os.unlink(os.path.join(self.local_outstore, 'plots/plot-roc.svg'))
self._run(['toil-vg', 'plot', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--position-stats', os.path.join(self.local_outstore, 'position.results.tsv'),
'--realTimeLogging', '--logInfo',
'--maxCores', '8'])
self._run(['toil', 'clean', self.jobStoreLocal])
self.assertGreater(os.path.getsize(os.path.join(self.local_outstore, 'plots/plot-pr.svg')), 0)
self.assertGreater(os.path.getsize(os.path.join(self.local_outstore, 'plots/plot-qq.svg')), 0)
self.assertGreater(os.path.getsize(os.path.join(self.local_outstore, 'plots/plot-roc.svg')), 0)
def test_04_sim_small_mapeval(self):
'''
Test running mapeval on some gams
'''
self.test_vg_graph = self._ci_input_path('small.vg')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._download_input('NA12877.brca1.bam_1.fq.gz')
self.baseline = self._ci_input_path('small.vcf.gz')
self.bed_regions = self._ci_input_path('small_regions.bed')
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--gcsa_index',
'--xg_index', '--snarls_index', '--id_ranges_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'sim', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), '2000',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam', '--sim_chunks', '5', '--maxCores', '8',
'--sim_opts', ' -l 150 -p 500 -v 50 -e 0.05 -i 0.01', '--seed', '1'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'map', self.jobStoreLocal, 'sample',
self.local_outstore,
'--xg_index', os.path.join(self.local_outstore, 'small.xg'),
'--gcsa_index', os.path.join(self.local_outstore, 'small.gcsa'),
'--container', self.containerType,
'--clean', 'never',
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--alignment_cores', '3', '--reads_per_chunk', '1000',
'--realTimeLogging', '--logInfo', '--interleaved'])
self._run(['toil', 'clean', self.jobStoreLocal])
# check running mapeval on the gams
self._run(['toil-vg', 'mapeval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--truth', os.path.join(self.local_outstore, 'true.pos'),
'--index-bases', os.path.join(self.local_outstore, 'small'),
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--gams', os.path.join(self.local_outstore, 'sample_default.gam'),
'--gam-names', 'vg-pe', '--realTimeLogging', '--logInfo',
'--maxCores', '8', '--bwa', '--paired-only', '--fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertMapEvalOutput(self.local_outstore, 4000, ['vg-pe', 'bwa-mem-pe'], 0.9)
def test_05_sim_small_calleval(self):
'''
Test running calleval on some mapeval ouput
'''
self.test_vg_graph = self._ci_input_path('small.vg')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self.chrom_fa_nz = self._ci_input_path('small.fa')
self._download_input('NA12877.brca1.bam_1.fq.gz')
self.baseline = self._ci_input_path('small.vcf.gz')
self.bed_regions = self._ci_input_path('small_regions.bed')
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--gcsa_index',
'--xg_index', '--snarls_index', '--id_ranges_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'sim', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), '2000',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam', '--sim_chunks', '5', '--maxCores', '8',
'--sim_opts', ' -l 150 -p 500 -v 50 -e 0.005 -i 0.001', '--seed', '1'])
self._run(['toil', 'clean', self.jobStoreLocal])
# check running mapeval on the indexes
self._run(['toil-vg', 'mapeval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam-input-xg', os.path.join(self.local_outstore, 'small.xg'),
'--index-bases', os.path.join(self.local_outstore, 'small'),
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--gam-names', 'vg', '--realTimeLogging', '--logInfo',
'--alignment_cores', '8',
'--maxCores', '8', '--bwa', '--fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertMapEvalOutput(self.local_outstore, 4000, ['vg', 'vg-pe', 'bwa-mem', 'bwa-mem-pe'], 0.8)
# check running calleval on the mapeval output
self._run(['toil-vg', 'calleval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--chroms', 'x',
'--xg_paths', os.path.join(self.local_outstore, 'small.xg'),
os.path.join(self.local_outstore, 'small.xg'),
'--gams', os.path.join(self.local_outstore, 'aligned-vg_default.gam'),
os.path.join(self.local_outstore, 'aligned-vg-pe_default.gam'),
'--gam_names', 'vg', 'vg-pe',
'--realTimeLogging', '--logInfo',
'--vcfeval_fasta', self.chrom_fa_nz,
'--vcfeval_baseline', self.baseline,
'--vcfeval_bed_regions', self.bed_regions,
'--sample_name', '1',
'--calling_cores', '2',
'--call',
'--freebayes', '--force_outstore',
'--bams', os.path.join(self.local_outstore, 'bwa-mem.bam'),
os.path.join(self.local_outstore, 'bwa-mem-pe.bam'),
'--bam_names', 'bwa-mem', 'bwa-mem-pe',
'--happy', '--surject'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertCallEvalOutput(self.local_outstore, ['vg-call', 'vg-pe-call', 'bwa-mem-fb', 'bwa-mem-pe-fb',
'vg-pe-surject-fb', 'vg-surject-fb'], 0.02, 0.02)
def test_06_BRCA1_NA12877(self):
''' Test sample BRCA1 output, graph construction and use, and local file processing
'''
self._download_input('NA12877.brca1.bam_1.fq.gz')
self._download_input('NA12877.brca1.bam_2.fq.gz')
self._download_input('snp1kg-brca1.vg')
self._download_input('platinum_NA12877_BRCA1.vcf.gz.tbi')
self._download_input('platinum_NA12877_BRCA1.vcf.gz')
self._download_input('BRCA1.fa.gz')
self.sample_reads = os.path.join(self.workdir, 'NA12877.brca1.bam_1.fq.gz')
self.sample_reads2 = os.path.join(self.workdir, 'NA12877.brca1.bam_2.fq.gz')
self.test_vg_graph = os.path.join(self.workdir, 'snp1kg-brca1.vg')
self.baseline = os.path.join(self.workdir, 'platinum_NA12877_BRCA1.vcf.gz')
self.chrom_fa = os.path.join(self.workdir, 'BRCA1.fa.gz')
self.bam_reads = self._ci_input_path('NA12877.brca1.bam')
self._run(self.base_command +
[self.jobStoreLocal, 'NA12877',
self.local_outstore,
'--clean', 'never',
'--fastq', self.sample_reads, self.sample_reads2, '--graphs',
self.test_vg_graph, '--chroms', '17', '--index_name', 'index',
'--vcf_offsets', '43044293',
'--vcfeval_baseline', self.baseline, '--vcfeval_fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('NA12877', self.local_outstore, f1_threshold=0.45)
# repeat but with bam reads (and not recreating gcsa)
self._run(self.base_command +
[self.jobStoreLocal, 'NA12877',
self.local_outstore,
'--clean', 'never',
'--bam_input_reads', self.bam_reads, '--graphs',
self.test_vg_graph, '--chroms', '17',
'--reads_per_chunk', '10000',
'--gcsa_index', os.path.join(self.local_outstore, 'index.gcsa'),
# single_reads_chunk currently required for bam in jenkins test but can't figure out
# why, as can't reproduce problems on the command line
'--single_reads_chunk',
'--vcf_offsets', '43044293',
'--vcfeval_baseline', self.baseline, '--vcfeval_fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('NA12877', self.local_outstore, f1_threshold=0.45)
def test_07_BRCA1_BRCA2_NA12877(self):
''' Test pipeline on chase with two chromosomes, in this case both BRCA regions
'''
self._download_input('NA12877.brca1.brca2.bam.fq.gz')
self._download_input('snp1kg-brca1.vg')
self._download_input('snp1kg-brca2.vg')
self._download_input('platinum_NA12877_BRCA1_BRCA2.vcf.gz.tbi')
self._download_input('platinum_NA12877_BRCA1_BRCA2.vcf.gz')
self._download_input('BRCA1_BRCA2.fa.gz')
self.sample_reads = os.path.join(self.workdir, 'NA12877.brca1.brca2.bam.fq.gz')
self.test_vg_graph = os.path.join(self.workdir, 'snp1kg-brca1.vg')
self.test_vg_graph2 = os.path.join(self.workdir, 'snp1kg-brca2.vg')
self.baseline = os.path.join(self.workdir, 'platinum_NA12877_BRCA1_BRCA2.vcf.gz')
self.chrom_fa = os.path.join(self.workdir, 'BRCA1_BRCA2.fa.gz')
self._run(self.base_command +
[self.jobStoreLocal, 'NA12877',
self.local_outstore,
'--clean', 'never',
'--fastq', self.sample_reads, '--graphs',
self.test_vg_graph, self.test_vg_graph2, '--chroms', '17', '13',
'--vcf_offsets', '43044293', '32314860', '--interleaved',
'--single_reads_chunk', '--index_name', 'genome', '--realTimeStderr',
'--vcfeval_baseline', self.baseline, '--vcfeval_fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('NA12877', self.local_outstore, f1_threshold=0.70)
''' Test running vg call on one gam that contains multiple paths
'''
self.sample_gam = os.path.join(self.local_outstore, 'NA12877_both.gam')
with open(os.path.join(self.local_outstore, 'NA12877_13.gam')) as gam1, \
open(os.path.join(self.local_outstore, 'NA12877_17.gam')) as gam2, \
open(self.sample_gam, 'w') as merged_gam:
shutil.copyfileobj(gam1, merged_gam)
shutil.copyfileobj(gam2, merged_gam)
self.xg_index = os.path.join(self.local_outstore, 'genome.xg')
outstore = self.local_outstore + '.2'
self._run(['toil-vg', 'call', self.jobStoreLocal,
'--container', self.containerType,
'--clean', 'never',
self.xg_index, 'NA12877', outstore, '--gams', self.sample_gam,
'--chroms', '17', '13', '--vcf_offsets', '43044293', '32314860',
'--call_chunk_size', '23000', '--calling_cores', '4',
'--realTimeLogging', '--realTimeStderr', '--logInfo', '--call_opts', '-E 0'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'vcfeval', self.jobStoreLocal, '--clean', 'never',
'--call_vcf', os.path.join(outstore, 'NA12877.vcf.gz'),
'--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, outstore,
'--realTimeLogging', '--realTimeStderr', '--logInfo',
'--vcfeval_opts', ' --ref-overlap'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput(None, outstore, f1_threshold=0.70)
''' Test surject
'''
self._run(['toil-vg', 'surject', self.jobStoreLocal,
'--container', self.containerType,
'--clean', 'never', outstore,
'--reads_per_chunk', '20000',
'--gam_input_reads', self.sample_gam, '--paths', '13', '17',
'--xg_index', self.xg_index, '--alignment_cores', '2'])
self._run(['toil', 'clean', self.jobStoreLocal])
''' Test recall (using old caller)
'''
#self._run(['toil-vg', 'call', self.jobStoreLocal,
# '--container', self.containerType,
# '--clean', 'never', '--old_call',
# self.xg_index, 'NA12877', outstore, '--gams', self.sample_gam,
# '--chroms', '17', '13', '--vcf_offsets', '43044293', '32314860',
# '--call_chunk_size', '23000', '--calling_cores', '4',
# '--realTimeLogging', '--realTimeStderr', '--logInfo', '--call_opts', '-E 0', '--recall'])
#self._run(['toil', 'clean', self.jobStoreLocal])
#
#self._assertXREF('NA12877', outstore)
# check bam not empty
self.assertGreater(os.path.getsize(os.path.join(outstore, 'surject.bam')), 250000)
def test_08_sim_small_outstore(self):
'''
This is the same as test #1, but exercises --force_outstore.
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._run(self.base_command +
[self.jobStoreLocal, '1',
self.local_outstore, '--clean', 'never', '--fastq', self.sample_reads,
'--graphs', self.test_vg_graph,
'--chroms', 'x', '--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, '--vcfeval_opts', ' --squash-ploidy',
'--force_outstore'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('1', self.local_outstore, f1_threshold=0.95)
def test_09_construct(self):
'''
Test that the output of toil-vg construct is somewhat reasonable
'''
in_vcf = self._ci_input_path('1kg_hg38-BRCA1.vcf.gz')
in_tbi = in_vcf + '.tbi'
in_fa = self._ci_input_path('17.fa')
in_region = '17:43044294-43125482'
out_name = 'snp1kg-BRCA1'
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--fasta', in_fa, '--vcf', in_vcf, '--regions', in_region,
'--out_name', out_name, '--pangenome', '--pos_control', 'HG00096',
'--neg_control', 'HG00096', '--sample_graph', 'HG00096',
'--filter_ceph', '--realTimeLogging', '--logInfo', '--validate',
'--haplo_sample', 'HG00096', '--min_af', '0.6', '--keep_vcfs'])
self._run(['toil', 'clean', self.jobStoreLocal])
# This is a fairly superficial check without adding machinery to read vg files
# Make sure output files exist and that they have expected relative sizes.
# Todo: better correctness checks (maybe compare to hand-generated data?
prev_vg_size = None
prev_vcf_size = None
for ext in ['', '_filter', '_minus_HG00096', '_HG00096_sample', '_HG00096_haplo', '_minaf_0.6']:
if ext and ext not in ['_HG00096_haplo', '_minaf_0.6', '_HG00096_sample']:
vcf_file = os.path.join(self.local_outstore, '{}-vcfs'.format(out_name), '1kg_hg38-BRCA1{}.vcf.gz'.format(ext))
assert os.path.isfile(vcf_file)
assert os.path.isfile(vcf_file + '.tbi')
# use the number of lines in vcf instead of size, since filtering cuts out a bunch
# of samples
with gzip.open(vcf_file) as vf:
vcf_size = len([line for line in vf])
if prev_vcf_size is not None:
assert vcf_size < prev_vcf_size
prev_vcf_size = vcf_size
vg_file = os.path.join(self.local_outstore, '{}{}.vg'.format(out_name, ext))
assert os.path.isfile(vg_file)
vg_size = os.path.getsize(vg_file)
if prev_vg_size is not None:
assert vg_size < prev_vg_size
prev_vg_size = vg_size
def test_10_sim_small_genotype(self):
'''
This is the same as test #1, but exercises --force_outstore and --genotype
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._run(self.base_command +
[self.jobStoreLocal, '1',
self.local_outstore, '--clean', 'never',
'--fastq', self.sample_reads,
'--graphs', self.test_vg_graph,
'--chroms', 'x', '--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, '--vcfeval_opts', ' --squash-ploidy',
'--genotype', '--genotype_opts', ' -Q -A'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('1', self.local_outstore, f1_threshold=0.95)
def test_11_gbwt(self):
'''
Test that the gbwt gets constructed without crashing (but not much beyond that)
'''
in_vcf = self._ci_input_path('1kg_hg38-BRCA1.vcf.gz')
in_tbi = in_vcf + '.tbi'
in_fa = self._ci_input_path('17.fa')
in_region = '17:43044294-43125482'
# make a snp1kg graph with alt paths
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--fasta', in_fa, '--vcf', in_vcf, '--regions', in_region,
'--out_name', 'snp1kg-BRCA1', '--alt_paths', '--pangenome',
'--realTimeLogging', '--logInfo', '--realTimeStderr'])
self._run(['toil', 'clean', self.jobStoreLocal])
# check graph exists
vg_path = os.path.join(self.local_outstore, 'snp1kg-BRCA1.vg')
self.assertTrue(os.path.isfile(vg_path))
# make a gbwt and xg index
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--xg_index', '--graphs', vg_path, '--chroms', '17',
'--vcf_phasing', in_vcf, '--index_name', 'my_index',
'--gbwt_index', '--xg_index_cores', '4', '--xg_alts',
'--realTimeLogging', '--logInfo', '--realTimeStderr'])
self._run(['toil', 'clean', self.jobStoreLocal])
# check gbwt exists
gbwt_path = os.path.join(self.local_outstore, 'my_index.gbwt')
self.assertTrue(os.path.isfile(gbwt_path))
# check gbwt not empty
self.assertGreater(os.path.getsize(gbwt_path), 250000)
def test_12_sim_small_mapeval_minimap2(self):
'''
Test minimap2 support in mapeval
'''
self.test_vg_graph = self._ci_input_path('small.vg')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._download_input('NA12877.brca1.bam_1.fq.gz')
self.baseline = self._ci_input_path('small.vcf.gz')
self.bed_regions = self._ci_input_path('small_regions.bed')
# Index the graphs
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--gcsa_index',
'--xg_index', '--snarls_index', '--id_ranges_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
# Simulate the reads
self._run(['toil-vg', 'sim', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), '2000',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam', '--sim_chunks', '5', '--maxCores', '8',
'--sim_opts', ' -l 150 -p 500 -v 50 -e 0.05 -i 0.01', '--seed', '1'])
self._run(['toil', 'clean', self.jobStoreLocal])
# Run mapeval with minimap2
self._run(['toil-vg', 'mapeval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam-input-xg', os.path.join(self.local_outstore, 'small.xg'),
'--index-bases', os.path.join(self.local_outstore, 'small'),
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--gam-names', 'vg', '--realTimeLogging', '--logInfo',
'--alignment_cores', '8', '--validate',
'--maxCores', '8', '--minimap2', '--fasta', self.chrom_fa])
self._run(['toil', 'clean', self.jobStoreLocal])
# TODO: Minimap2 is quite inaccurate on this tiny test. Maybe it only works well at larger scales?
# Note that mpmap2 only runs on paired end reads.
self._assertMapEvalOutput(self.local_outstore, 4000, ['vg', 'vg-pe', 'minimap2-pe'], 0.6)
def test_13_sim_small_mapeval_snarls(self):
'''
Test running mapeval with and without snarls
'''
self.test_vg_graph = self._ci_input_path('small.vg')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self._download_input('NA12877.brca1.bam_1.fq.gz')
self.baseline = self._ci_input_path('small.vcf.gz')
self.bed_regions = self._ci_input_path('small_regions.bed')
# Index the graphs
self._run(['toil-vg', 'index', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--graphs', self.test_vg_graph, '--chroms', 'x',
'--gcsa_index_cores', '8',
'--realTimeLogging', '--logInfo', '--index_name', 'small', '--gcsa_index',
'--xg_index', '--snarls_index', '--id_ranges_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
# Simulate the reads
self._run(['toil-vg', 'sim', self.jobStoreLocal,
os.path.join(self.local_outstore, 'small.xg'), '2000',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam', '--sim_chunks', '5', '--maxCores', '8',
'--sim_opts', ' -l 150 -p 500 -v 50 -e 0.05 -i 0.01', '--seed', '1', '--validate'])
self._run(['toil', 'clean', self.jobStoreLocal])
# Run mapeval with minimap2
self._run(['toil-vg', 'mapeval', self.jobStoreLocal,
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--gam-input-xg', os.path.join(self.local_outstore, 'small.xg'),
'--index-bases', os.path.join(self.local_outstore, 'small'),
'--gam_input_reads', os.path.join(self.local_outstore, 'sim.gam'),
'--gam-names', 'vg',
'--use-snarls', '--strip-snarls', '--multipath', '--paired-only',
'--realTimeLogging', '--logInfo',
'--alignment_cores', '8',
'--maxCores', '8'])
self._run(['toil', 'clean', self.jobStoreLocal])
# Note that snarls only matter for mpmap
self._assertMapEvalOutput(self.local_outstore, 4000, ['vg-pe', 'vg-mp-pe', 'vg-nosnarls-mp-pe'], 0.8)
def test_14_construct_naming_options(self):
"""
Make sure we don't get crashes when mixing and matching chromosomes with and without chr
prefix when using the appropriate options
"""
in_vcf = self._ci_input_path('1kg_hg38-BRCA1.vcf.gz')
in_tbi = in_vcf + '.tbi'
self._download_input('17.fa')
fa_path = os.path.join(self.workdir, '17.fa')
subprocess.check_call(['sed', '-i', 's/17/chr17/g', fa_path])
in_region = '17:43044294-43125482'
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--fasta', fa_path, '--vcf', in_vcf, '--regions', in_region,
'--out_name', 'snp1kg-BRCA1', '--pangenome', '--pos_control', 'HG00096',
'--realTimeLogging', '--logInfo', '--validate', '--add_chr_prefix', '--mask_ambiguous'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--fasta', fa_path, '--vcf', in_vcf, '--regions', in_region, '--handle_svs',
'--out_name', 'snp1kg-BRCA1', '--pangenome', '--pos_control', 'HG00096',
'--realTimeLogging', '--logInfo', '--validate', '--remove_chr_prefix'])
self._run(['toil', 'clean', self.jobStoreLocal])
def test_15_sim_small_no_call_chunking(self):
'''
This is the same as test #1, but exercises --call_chunk_size 0
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self.base_command[self.base_command.index('--call_chunk_size') + 1] = '0'
self._run(self.base_command +
[self.jobStoreLocal, '1',
self.local_outstore, '--clean', 'never',
'--fastq', self.sample_reads,
'--graphs', self.test_vg_graph,
'--chroms', 'x', '--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, '--vcfeval_opts', ' --squash-ploidy'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('1', self.local_outstore, f1_threshold=0.95)
def test_16_sv_genotyping(self):
'''
End to end SV genotyping on chr21 and chr22 of the HGSVC graph.
We subset reads and variants to chr21:5000000-6000000 and chr22:10000000-11000000
and the fastas are subset to chr21:1-7000000 and chr22:1-12000000
'''
fa_path = self._ci_input_path('hg38_chr21_22.fa.gz')
vcf_path = self._ci_input_path('HGSVC_regions.vcf.gz')
gam_reads_path = self._ci_input_path('HGSVC_regions.gam')
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--gcsa_index_cores', '8',
'--clean', 'never',
'--fasta', fa_path,
'--regions', 'chr21', 'chr22',
'--vcf', vcf_path,
'--out_name', 'HGSVC', '--pangenome', '--flat_alts', '--alt_path_gam_index', '--xg_index', '--gcsa_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'map', self.jobStoreLocal, 'HG00514',
self.local_outstore,
'--xg_index', os.path.join(self.local_outstore, 'HGSVC.xg'),
'--gcsa_index', os.path.join(self.local_outstore, 'HGSVC.gcsa'),
'--container', self.containerType,
'--clean', 'never',
'--gam_input_reads', gam_reads_path,
'--interleaved',
'--alignment_cores', '8',
'--single_reads_chunk',
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'call', self.jobStoreLocal,
os.path.join(self.local_outstore, 'HGSVC.xg'),
'HG00514',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--chroms', 'chr21', 'chr22',
'--call_chunk_cores', '8',
'--recall_context', '200',
'--gams', os.path.join(self.local_outstore, 'HG00514_default.gam'),
'--alt_path_gam', os.path.join(self.local_outstore, 'HGSVC_alts.gam'),
'--genotype_vcf', vcf_path,
'--call_chunk_cores', '8'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'vcfeval', self.jobStoreLocal,
self.local_outstore,
'--sveval',
'--vcfeval_baseline', vcf_path,
'--vcfeval_sample', 'HG00514',
'--normalize',
'--vcfeval_fasta', fa_path,
'--call_vcf', os.path.join(self.local_outstore, 'HG00514.vcf.gz')])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertSVEvalOutput(self.local_outstore, f1_threshold=0.31)
def test_17_sim_small_pack_calling(self):
'''
This is the same as test #1, but exercises --call_chunk_size 0
'''
self.sample_reads = self._ci_input_path('small_sim_reads.fq.gz')
self.test_vg_graph = self._ci_input_path('small.vg')
self.baseline = self._ci_input_path('small.vcf.gz')
self.chrom_fa = self._ci_input_path('small.fa.gz')
self.base_command[self.base_command.index('--call_chunk_size') + 1] = '0'
self._run(self.base_command +
[self.jobStoreLocal, '1',
self.local_outstore, '--clean', 'never',
'--fastq', self.sample_reads,
'--graphs', self.test_vg_graph,
'--chroms', 'x', '--vcfeval_baseline', self.baseline,
'--vcfeval_fasta', self.chrom_fa, '--vcfeval_opts', ' --squash-ploidy',
'--pack', '--recall'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertOutput('1', self.local_outstore, f1_threshold=0.95)
def test_18_pack_sv_genotyping(self):
'''
End to end SV genotyping on chr21 and chr22 of the HGSVC graph.
We subset reads and variants to chr21:5000000-6000000 and chr22:10000000-11000000
and the fastas are subset to chr21:1-7000000 and chr22:1-12000000
Exactly like test 16, but using the new --pack option
'''
fa_path = self._ci_input_path('hg38_chr21_22.fa.gz')
vcf_path = self._ci_input_path('HGSVC_regions.vcf.gz')
gam_reads_path = self._ci_input_path('HGSVC_regions.gam')
self._run(['toil-vg', 'construct', self.jobStoreLocal, self.local_outstore,
'--container', self.containerType,
'--gcsa_index_cores', '8',
'--clean', 'never',
'--fasta', fa_path,
'--regions', 'chr21', 'chr22',
'--vcf', vcf_path,
'--out_name', 'HGSVC', '--pangenome', '--flat_alts', '--alt_path_gam_index', '--xg_index',
'--gcsa_index', '--snarls_index'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'map', self.jobStoreLocal, 'HG00514',
self.local_outstore,
'--xg_index', os.path.join(self.local_outstore, 'HGSVC.xg'),
'--gcsa_index', os.path.join(self.local_outstore, 'HGSVC.gcsa'),
'--container', self.containerType,
'--clean', 'never',
'--gam_input_reads', gam_reads_path,
'--interleaved',
'--alignment_cores', '8',
'--single_reads_chunk',
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'call', self.jobStoreLocal,
os.path.join(self.local_outstore, 'HGSVC.xg'),
'HG00514',
self.local_outstore,
'--container', self.containerType,
'--clean', 'never',
'--chroms', 'chr21', 'chr22',
'--call_chunk_cores', '8',
'--recall_context', '200',
'--gams', os.path.join(self.local_outstore, 'HG00514_default.gam'),
'--alt_path_gam', os.path.join(self.local_outstore, 'HGSVC_alts.gam'),
'--genotype_vcf', vcf_path,
'--call_chunk_cores', '8', '--pack',
'--snarls', os.path.join(self.local_outstore, 'HGSVC.snarls'),
'--realTimeLogging', '--logInfo'])
self._run(['toil', 'clean', self.jobStoreLocal])
self._run(['toil-vg', 'vcfeval', self.jobStoreLocal,
self.local_outstore,
'--sveval',
'--vcfeval_baseline', vcf_path,
'--vcfeval_sample', 'HG00514',
'--normalize',
'--vcfeval_fasta', fa_path,
'--call_vcf', os.path.join(self.local_outstore, 'HG00514.vcf.gz')])
self._run(['toil', 'clean', self.jobStoreLocal])
self._assertSVEvalOutput(self.local_outstore, f1_threshold=0.31)
def _run(self, args):
log.info('Running %r', args)
subprocess.check_call(args)
def _assertOutput(self, sample_name, outstore, f1_threshold=0.90):
# grab the f1.txt file
local_f1 = os.path.join(self.workdir, 'f1.txt')
io_store = IOStore.get(outstore)
f1_path = 'vcfeval_output_f1.txt'
if sample_name:
f1_path = sample_name + '_' + f1_path
io_store.read_input_file(f1_path, local_f1)
with open(local_f1) as f1_file:
f1_score = float(f1_file.readline().strip())
print f1_score
self.assertGreaterEqual(f1_score, f1_threshold)
def _assertMapEvalOutput(self, outstore, test_count, names, acc_threshold):
with open(os.path.join(outstore, 'stats.tsv')) as stats:
table = [line for line in stats]
headers = set()
for row in table[1:]:
toks = row.split()
if len(toks) != 6:
raise RuntimeError("toks should have 6 entries but is {}".format(toks))
self.assertEqual(len(toks), 6)
name, count, acc, auc, qqr, maxf1 = \
toks[0], int(toks[1]), float(toks[2]), float(toks[3]), float(toks[4]), float(toks[5])
headers.add(name)
self.assertEqual(count, test_count)
self.assertGreater(acc, acc_threshold)
# todo: look into this more.
#self.assertTrue(auc > 0.9)
#self.assertTrue(qqur > -10)
self.assertEqual(headers, set(names))
# make sure plots get drawn
self.assertGreater(os.path.getsize(os.path.join(outstore, 'plots/plot-pr.svg')), 0)
self.assertGreater(os.path.getsize(os.path.join(outstore, 'plots/plot-qq.svg')), 0)
def _assertCallEvalOutput(self, outstore, names, f1_threshold, happy_snp_f1_threshold=-1,
happy_indel_f1_threshold=-1):
with open(os.path.join(outstore, 'calleval_stats.tsv')) as stats:
table = [line for line in stats]
headers = set()
for row in table:
toks = row.split()
self.assertEqual(len(toks), 4)
name, f1, happy_snp_f1, happy_indel_f1, = toks[0], toks[1], toks[2], toks[3]
headers.add(name)
self.assertGreaterEqual(float(f1), f1_threshold)
self.assertGreaterEqual(float(happy_snp_f1), happy_snp_f1_threshold)
self.assertGreaterEqual(float(happy_indel_f1), happy_indel_f1_threshold)
self.assertEqual(headers, set(names))
def _assertXREF(self, sample, outstore):
var_count = 0
xref_count = 0
with gzip.open(os.path.join(outstore, '{}.vcf.gz'.format(sample)), 'rb') as vcf_file:
for line in vcf_file:
if line.strip() and line.strip()[0] != '#':
# TODO: clean up call (and/or recall options) to get rid of this hack
if len(line.split()) > 4 and line.split()[4] != '.':
var_count += 1
if 'XREF' in line:
xref_count += 1
self.assertEqual(var_count, xref_count)
def _assertSVEvalOutput(self, outstore, f1_threshold):
with open(os.path.join(outstore, 'sv_accuracy.tsv')) as sv_stats:
table = [line.split() for line in sv_stats]
self.assertEqual(table[0][-1], 'F1')
self.assertGreater(float(table[1][-1]), f1_threshold)
def tearDown(self):
if not self.saveWorkDir:
shutil.rmtree(self.workdir)
subprocess.check_call(['toil', 'clean', self.jobStoreLocal])
| apache-2.0 |
simonwydooghe/ansible | lib/ansible/module_utils/facts/other/ohai.py | 232 | 2307 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts.collector import BaseFactCollector
class OhaiFactCollector(BaseFactCollector):
'''This is a subclass of Facts for including information gathered from Ohai.'''
name = 'ohai'
_fact_ids = set()
def __init__(self, collectors=None, namespace=None):
namespace = PrefixFactNamespace(namespace_name='ohai',
prefix='ohai_')
super(OhaiFactCollector, self).__init__(collectors=collectors,
namespace=namespace)
def find_ohai(self, module):
ohai_path = module.get_bin_path('ohai')
return ohai_path
def run_ohai(self, module, ohai_path,):
rc, out, err = module.run_command(ohai_path)
return rc, out, err
def get_ohai_output(self, module):
ohai_path = self.find_ohai(module)
if not ohai_path:
return None
rc, out, err = self.run_ohai(module, ohai_path)
if rc != 0:
return None
return out
def collect(self, module=None, collected_facts=None):
ohai_facts = {}
if not module:
return ohai_facts
ohai_output = self.get_ohai_output(module)
if ohai_output is None:
return ohai_facts
try:
ohai_facts = json.loads(ohai_output)
except Exception:
# FIXME: useful error, logging, something...
pass
return ohai_facts
| gpl-3.0 |
kmcginn/advent-of-code | 2017/day05/jump.py | 1 | 2214 | """
from: http://adventofcode.com/2017/day/5
--- Day 5: A Maze of Twisty Trampolines, All Alike ---
An urgent interrupt arrives from the CPU: it's trapped in a maze of jump instructions, and it would
like assistance from any programs with spare cycles to help find the exit.
The message includes a list of the offsets for each jump. Jumps are relative: -1 moves to the
previous instruction, and 2 skips the next one. Start at the first instruction in the list. The goal
is to follow the jumps until one leads outside the list.
In addition, these instructions are a little strange; after each jump, the offset of that
instruction increases by 1. So, if you come across an offset of 3, you would move three instructions
forward, but change it to a 4 for the next time it is encountered.
For example, consider the following list of jump offsets:
0
3
0
1
-3
Positive jumps ("forward") move downward; negative jumps move upward. For legibility in this
example, these offset values will be written all on one line, with the current instruction marked in
parentheses. The following steps would be taken before an exit is found:
(0) 3 0 1 -3 - before we have taken any steps.
(1) 3 0 1 -3 - jump with offset 0 (that is, don't jump at all). Fortunately, the instruction is
then incremented to 1.
2 (3) 0 1 -3 - step forward because of the instruction we just modified. The first instruction
is incremented again, now to 2.
2 4 0 1 (-3) - jump all the way to the end; leave a 4 behind.
2 (4) 0 1 -2 - go back to where we just were; increment -3 to -2.
2 5 0 1 -2 - jump 4 steps forward, escaping the maze.
In this example, the exit is reached in 5 steps.
How many steps does it take to reach the exit?
"""
def main():
"""Solve the problem!"""
maze = []
jump_count = 0
# import the maze
with open("input.txt") as input_file:
for line in input_file:
maze.append(int(line))
index = 0
while index < len(maze):
jump_value = maze[index]
maze[index] = maze[index] + 1
index = index + jump_value
jump_count = jump_count + 1
print(jump_count)
if __name__ == "__main__":
main()
| mit |
rschaad/owlmonitor | docs/conf.py | 1 | 9173 | # -*- coding: utf-8 -*-
#
# OWL Monitor documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 29 14:54:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OWL Monitor'
copyright = u'2016, RSC'
author = u'RSC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'OWLMonitordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OWLMonitor.tex', u'OWL Monitor Documentation',
u'RSC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'owlmonitor', u'OWL Monitor Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OWLMonitor', u'OWL Monitor Documentation',
author, 'OWLMonitor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
davidzchen/jsonnet | case_studies/micromanage/cmds.py | 6 | 2679 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
# E.g. replace Simon's cat with 'Simon'\''s cat'.
def escape(s):
return "'%s'" % s.replace("'", "'\"'\"'")
def file_glob(given_glob, to, prefix):
dirs = []
files = []
lp = len(prefix)
for f in glob.glob(given_glob):
if os.path.isdir(f):
more_files = file_glob('%s/*' % f, to, prefix)
files += more_files
else:
files.append((f, to + f[lp:]))
return files
def compile_command_to_bash(cmd):
if isinstance(cmd, basestring):
return [cmd]
elif cmd['kind'] == 'LiteralFile':
return [
'echo -n %s > %s' % (escape(cmd['content']), escape(cmd['to'])),
'chmod -v %s %s' % (cmd['filePermissions'], escape(cmd['to'])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(cmd['to'])),
]
elif cmd['kind'] == 'CopyFile':
files = file_glob(cmd['from'], cmd['to'], os.path.dirname(cmd['from']))
dirs = set([os.path.dirname(f[1]) for f in files]) - {cmd['to']}
lines = []
for d in dirs:
lines += [
'mkdir -v -p %s' % escape(d),
'chmod -v %s %s' % (cmd['dirPermissions'], escape(d)),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(d)),
]
for f in files:
with open (f[0], "r") as stream:
content = stream.read()
lines += [
'echo -n %s > %s' % (escape(content), escape(f[1])),
'chmod -v %s %s' % (cmd['filePermissions'], escape(f[1])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(f[1])),
]
return lines
elif cmd['kind'] == 'EnsureDir':
return [
'mkdir -v -p %s' % escape(cmd['dir']),
'chmod -v %s %s' % (cmd['dirPermissions'], escape(cmd['dir'])),
'chown -v %s.%s %s' % (cmd['owner'], cmd['group'], escape(cmd['dir'])),
]
else:
raise RuntimeError('Did not recognize image command kind: ' + cmd['kind'])
| apache-2.0 |
abma/pr-downloader | scripts/update-repos.py | 2 | 1875 | #!/usr/bin/python3
# updates repos.gz
import gzip
import os
wwwroot = "/home/packages/www/repos.springrts.com"
prefix = "http://repos.springrts.com/"
streamer = "/home/packages/bin/Streamer"
repos = {
"main": "http://packages.springrts.com"
}
assert(os.path.isfile(streamer))
def SetupStreamer(repodir):
streamercgi = os.path.join(repodir, "streamer.cgi")
if os.path.islink(streamercgi):
if os.readlink(streamercgi) == streamer:
return
print("Deleted invalid link %s" %(streamercgi))
os.unlink(streamercgi)
print("Created symlink %s -> %s" %(streamercgi, streamer))
os.symlink(streamer, streamercgi)
def GetWWWRepos(wwwroot):
for repo in os.listdir(wwwroot):
absname = os.path.join(wwwroot, repo)
if not os.path.isdir(absname):
continue
assert(repo not in repos)
repos[repo] = prefix + repo
return repos
def ParseRepos(reposgz):
res = {}
with gzip.open(os.path.join(wwwroot, "repos.gz"), "rb") as f:
for line in f.readlines():
name, url, _, _ = line.decode().split(",", 4)
res[name] = url
return res
def WriteRepos(reposgz, repos):
with gzip.open(reposgz, "wb") as f:
for reponame, prefix in sorted(repos.items()):
line = "%s,%s,,\n" %(reponame, prefix)
f.write(line.encode())
def SetupRepos(wwwroot, wwwrepos):
for reponame in wwwrepos:
absdir = os.path.join(wwwroot, reponame)
if os.path.isdir(absdir):
SetupStreamer(absdir)
def ReposEqual(repoa, repob):
for a, b in (repoa, repob), (repob, repoa):
for k, v in a.items():
if not k in b:
return False
if v != b[k]:
return False
return True
reposgz = os.path.join(wwwroot, "repos.gz")
repos.update(GetWWWRepos(wwwroot))
SetupRepos(wwwroot, repos)
currentrepos = ParseRepos(reposgz)
if not ReposEqual(repos, currentrepos):
print("Updating %s" %(reposgz))
WriteRepos(reposgz, repos)
assert(ReposEqual(ParseRepos(reposgz), repos))
| gpl-2.0 |
Maximilian-Reuter/SickRage-1 | lib/hachoir_parser/program/prc.py | 86 | 2766 | """
PRC (Palm resource) parser.
Author: Sebastien Ponce
Creation date: 29 october 2008
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
UInt16, UInt32, TimestampMac32,
String, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
class PRCHeader(FieldSet):
static_size = 78*8
def createFields(self):
yield String(self, "name", 32, "Name")
yield UInt16(self, "flags", "Flags")
yield UInt16(self, "version", "Version")
yield TimestampMac32(self, "create_time", "Creation time")
yield TimestampMac32(self, "mod_time", "Modification time")
yield TimestampMac32(self, "backup_time", "Backup time")
yield UInt32(self, "mod_num", "mod num")
yield UInt32(self, "app_info", "app info")
yield UInt32(self, "sort_info", "sort info")
yield UInt32(self, "type", "type")
yield UInt32(self, "id", "id")
yield UInt32(self, "unique_id_seed", "unique_id_seed")
yield UInt32(self, "next_record_list", "next_record_list")
yield UInt16(self, "num_records", "num_records")
class ResourceHeader(FieldSet):
static_size = 10*8
def createFields(self):
yield String(self, "name", 4, "Name of the resource")
yield UInt16(self, "flags", "ID number of the resource")
yield UInt32(self, "offset", "Pointer to the resource data")
def createDescription(self):
return "Resource Header (%s)" % self["name"]
class PRCFile(Parser):
PARSER_TAGS = {
"id": "prc",
"category": "program",
"file_ext": ("prc", ""),
"min_size": ResourceHeader.static_size, # At least one program header
"mime": (
u"application/x-pilot-prc",
u"application/x-palmpilot"),
"description": "Palm Resource File"
}
endian = BIG_ENDIAN
def validate(self):
# FIXME: Implement the validation function!
return False
def createFields(self):
# Parse header and program headers
yield PRCHeader(self, "header", "Header")
lens = []
firstOne = True
poff = 0
for index in xrange(self["header/num_records"].value):
r = ResourceHeader(self, "res_header[]")
if firstOne:
firstOne = False
else:
lens.append(r["offset"].value - poff)
poff = r["offset"].value
yield r
lens.append(self.size/8 - poff)
yield UInt16(self, "placeholder", "Place holder bytes")
for i in range(len(lens)):
yield RawBytes(self, "res[]", lens[i], '"'+self["res_header["+str(i)+"]/name"].value+"\" Resource")
def createDescription(self):
return "Palm Resource file"
| gpl-3.0 |
tiagofrepereira2012/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 52 | 69800 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
self.assertEqual(50, step_counter.steps)
else:
# Occasionally, training stops when global_step == 101, due to a race
# condition.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
lehmannro/pootle | external_apps/profiles/utils.py | 21 | 1467 | """
Utility functions for retrieving and generating forms for the
site-specific user profile model specified in the
``AUTH_PROFILE_MODULE`` setting.
"""
from django import forms
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.db.models import get_model
def get_profile_model():
"""
Return the model class for the currently-active user profile
model, as defined by the ``AUTH_PROFILE_MODULE`` setting. If that
setting is missing, raise
``django.contrib.auth.models.SiteProfileNotAvailable``.
"""
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod
def get_profile_form():
"""
Return a form class (a subclass of the default ``ModelForm``)
suitable for creating/editing instances of the site-specific user
profile model, as defined by the ``AUTH_PROFILE_MODULE``
setting. If that setting is missing, raise
``django.contrib.auth.models.SiteProfileNotAvailable``.
"""
profile_mod = get_profile_model()
class _ProfileForm(forms.ModelForm):
class Meta:
model = profile_mod
exclude = ('user',) # User will be filled in by the view.
return _ProfileForm
| gpl-2.0 |
chuijiaolianying/robotx | robotx/core/__init__.py | 4 | 3209 | """Dealing with all general issues about command line tool set.
And this is command entry point.
"""
import sys
import inspect
import optparse
from robotx.core.base import BaseCommand
from robotx.utils.misc import walk_modules
from robotx.core.exceptions import UsageError
def _iter_command_classes(module_name):
"""generator for getting cmd"""
for module in walk_modules(module_name):
for obj in vars(module).itervalues():
if inspect.isclass(obj) and \
issubclass(obj, BaseCommand) and \
obj.__module__ == module.__name__:
yield obj
def _get_commands_from_module(module):
"""As subject~"""
d = {}
for cmd in _iter_command_classes(module):
cmdname = cmd.__module__.split('.')[-1]
d[cmdname] = cmd()
return d
def _get_commands_dict(settings):
"""As subject~"""
cmds = _get_commands_from_module('robotx.core.commands')
return cmds
def _pop_command_name(argv):
"""As subject~"""
i = 0
for arg in argv[1:]:
if not arg.startswith('-'):
del argv[i]
return arg
i += 1
def _print_header(settings):
"""As subject~"""
print "RobotX - Toolset for Automation Development with Robot Framework\n"
def _print_commands(settings):
"""As subject~"""
_print_header(settings)
print "Usage:"
print " robotx <sub-command> [options] [args]\n"
print "Available sub-commands:"
cmds = _get_commands_dict(settings)
for cmdname, cmdclass in sorted(cmds.iteritems()):
print " %-16s %s" % (cmdname, cmdclass.short_desc())
print '\nUse "robotx <sub-command> -h" to see more info about sub-command'
def _print_unknown_command(settings, cmdname):
"""As subject~"""
_print_header(settings)
print "Unknown command: %s\n" % cmdname
print 'Use "robotx" to see available commands'
def _run_print_help(parser, func, *a, **kw):
"""As subject~"""
try:
func(*a, **kw)
except UsageError, e:
if str(e):
parser.error(str(e))
if e.print_help:
parser.print_help()
sys.exit(2)
def execute(argv=None, settings=None):
"""The entry point of command line"""
if argv is None:
argv = sys.argv
if settings is None:
settings = 'nothing'
cmds = _get_commands_dict(settings)
cmdname = _pop_command_name(argv)
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
conflict_handler='resolve')
if not cmdname:
_print_commands(settings)
sys.exit(0)
elif cmdname not in cmds:
_print_unknown_command(settings, cmdname)
sys.exit(2)
cmd = cmds[cmdname]
parser.usage = "robotx %s %s" % (cmdname, cmd.syntax())
parser.description = cmd.long_desc()
cmd.add_options(parser)
opts, args = parser.parse_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
def _run_command(cmd, args, opts):
"""As subject~"""
cmd.run(args, opts)
if __name__ == '__main__':
execute()
| mit |
arifgursel/pyglet | examples/programming_guide/hello_world.py | 30 | 2206 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run()
| bsd-3-clause |
MiltosD/CEF-ELRC | misc/tools/generateDS-2.7a/tests/literal1.py | 12 | 6179 | #from out2_sup import *
import out2_sup as model_
rootObj = model_.rootTag(
comments=[
model_.comments(
content_ = [
model_.MixedContainer(1, 0, "", "1. This is a "),
model_.MixedContainer(2, 2, "emp", "foolish"),
model_.MixedContainer(1, 0, "", " comment. It is "),
model_.MixedContainer(2, 2, "emp", "really"),
model_.MixedContainer(1, 0, "", " important."),
],
valueOf_ = """1. This is a comment. It is important.""",
),
model_.comments(
content_ = [
model_.MixedContainer(1, 0, "", "2. This is a "),
model_.MixedContainer(2, 2, "emp", "silly"),
model_.MixedContainer(1, 0, "", " comment. It is "),
model_.MixedContainer(2, 2, "emp", "very"),
model_.MixedContainer(1, 0, "", " significant."),
],
valueOf_ = """2. This is a comment. It is significant.""",
),
],
person=[
model_.person(
ratio = 3.200000,
id = 1,
value = "abcd",
name='Alberta',
interest=[
'gardening',
'reading',
],
category=5,
agent=[
],
promoter=[
],
),
model_.person(
id = 2,
name='Bernardo',
interest=[
'programming',
],
category=0,
agent=[
model_.agent(
firstname='Darren',
lastname='Diddly',
priority=4.500000,
info=model_.info(
rating = 5.330000,
type_ = 321,
name = "Albert Abasinian",
),
),
],
promoter=[
],
),
model_.person(
id = 3,
name='Charlie',
interest=[
'people',
'cats',
'dogs',
],
category=8,
agent=[
],
promoter=[
model_.booster(
firstname='David',
lastname='Donaldson',
other_value=[
],
type_=[
],
client_handler=[
],
),
model_.booster(
firstname='Edward',
lastname='Eddleberry',
other_value=[
],
type_=[
],
client_handler=[
],
),
],
),
model_.person(
id = 4,
),
],
programmer=[
model_.programmer(
language = "python",
area = "xml",
id = 2,
name='Charles Carlson',
interest=[
'programming',
],
category=2233,
agent=[
model_.agent(
firstname='Ernest',
lastname='Echo',
priority=3.800000,
info=model_.info(
rating = 5.330000,
type_ = 321,
name = "George Gregory",
),
),
],
promoter=[
],
description='A very happy programmer',
email='[email protected]',
elposint=14,
elnonposint=0,
elnegint=-12,
elnonnegint=4,
eldate='2005-04-26',
eltoken='aa bb cc dd',
elshort=123,
ellong=13241234123412341234,
elparam=model_.param(
semantic = "a big semantic",
name = "Davy",
id = "id001",
valueOf_ = """""",
),
),
],
python_programmer=[
model_.python_programmer(
nick_name = "davy",
language = "python",
area = "xml",
vegetable = "tomato",
fruit = "peach",
ratio = 8.700000,
id = 232,
value = "abcd",
name='Darrel Dawson',
interest=[
'hang gliding',
],
category=3344,
agent=[
model_.agent(
firstname='Harvey',
lastname='Hippolite',
priority=5.200000,
info=model_.info(
rating = 6.550000,
type_ = 543,
name = "Harvey Hippolite",
),
),
],
promoter=[
],
description='An object-orientated programmer',
email='[email protected]',
favorite_editor='jed',
),
],
java_programmer=[
model_.java_programmer(
nick_name = "davy",
language = "python",
area = "xml",
vegetable = "tomato",
fruit = "peach",
ratio = 8.700000,
id = 232,
value = "abcd",
name='Darrel Dawson',
interest=[
'hang gliding',
],
category=3344,
agent=[
model_.agent(
firstname='Harvey',
lastname='Hippolite',
priority=5.200000,
info=model_.info(
rating = 6.550000,
type_ = 543,
name = "Harvey Hippolite",
),
),
],
promoter=[
],
description='An object-orientated programmer',
email='[email protected]',
favorite_editor='jed',
),
],
)
| bsd-3-clause |
sergiorua/libcloud | libcloud/compute/ssh.py | 10 | 16719 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wraps multiple ways to communicate over SSH.
"""
have_paramiko = False
try:
import paramiko
have_paramiko = True
except ImportError:
pass
# Depending on your version of Paramiko, it may cause a deprecation
# warning on Python 2.6.
# Ref: https://bugs.launchpad.net/paramiko/+bug/392973
import os
import time
import subprocess
import logging
import warnings
from os.path import split as psplit
from os.path import join as pjoin
from libcloud.utils.logging import ExtraLogFormatter
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import b
__all__ = [
'BaseSSHClient',
'ParamikoSSHClient',
'ShellOutSSHClient',
'SSHCommandTimeoutError'
]
class SSHCommandTimeoutError(Exception):
"""
Exception which is raised when an SSH command times out.
"""
def __init__(self, cmd, timeout):
self.cmd = cmd
self.timeout = timeout
message = 'Command didn\'t finish in %s seconds' % (timeout)
super(SSHCommandTimeoutError, self).__init__(message)
def __repr__(self):
return ('<SSHCommandTimeoutError: cmd="%s",timeout=%s)>' %
(self.cmd, self.timeout))
def __str__(self):
return self.message
class BaseSSHClient(object):
"""
Base class representing a connection over SSH/SCP to a remote node.
"""
def __init__(self, hostname, port=22, username='root', password=None,
key=None, key_files=None, timeout=None):
"""
:type hostname: ``str``
:keyword hostname: Hostname or IP address to connect to.
:type port: ``int``
:keyword port: TCP port to communicate on, defaults to 22.
:type username: ``str``
:keyword username: Username to use, defaults to root.
:type password: ``str``
:keyword password: Password to authenticate with or a password used
to unlock a private key if a password protected key
is used.
:param key: Deprecated in favor of ``key_files`` argument.
:type key_files: ``str`` or ``list``
:keyword key_files: A list of paths to the private key files to use.
"""
if key is not None:
message = ('You are using deprecated "key" argument which has '
'been replaced with "key_files" argument')
warnings.warn(message, DeprecationWarning)
# key_files has precedent
key_files = key if not key_files else key_files
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.key_files = key_files
self.timeout = timeout
def connect(self):
"""
Connect to the remote node over SSH.
:return: True if the connection has been successfully established,
False otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'connect not implemented for this ssh client')
def put(self, path, contents=None, chmod=None, mode='w'):
"""
Upload a file to the remote node.
:type path: ``str``
:keyword path: File path on the remote node.
:type contents: ``str``
:keyword contents: File Contents.
:type chmod: ``int``
:keyword chmod: chmod file to this after creation.
:type mode: ``str``
:keyword mode: Mode in which the file is opened.
:return: Full path to the location where a file has been saved.
:rtype: ``str``
"""
raise NotImplementedError(
'put not implemented for this ssh client')
def delete(self, path):
"""
Delete/Unlink a file on the remote node.
:type path: ``str``
:keyword path: File path on the remote node.
:return: True if the file has been successfully deleted, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete not implemented for this ssh client')
def run(self, cmd):
"""
Run a command on a remote node.
:type cmd: ``str``
:keyword cmd: Command to run.
:return ``list`` of [stdout, stderr, exit_status]
"""
raise NotImplementedError(
'run not implemented for this ssh client')
def close(self):
"""
Shutdown connection to the remote node.
:return: True if the connection has been successfully closed, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'close not implemented for this ssh client')
def _get_and_setup_logger(self):
logger = logging.getLogger('libcloud.compute.ssh')
path = os.getenv('LIBCLOUD_DEBUG')
if path:
handler = logging.FileHandler(path)
handler.setFormatter(ExtraLogFormatter())
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
class ParamikoSSHClient(BaseSSHClient):
"""
A SSH Client powered by Paramiko.
"""
# Maximum number of bytes to read at once from a socket
CHUNK_SIZE = 1024
# How long to sleep while waiting for command to finish
SLEEP_DELAY = 1.5
def __init__(self, hostname, port=22, username='root', password=None,
key=None, key_files=None, key_material=None, timeout=None):
"""
Authentication is always attempted in the following order:
- The key passed in (if key is provided)
- Any key we can find through an SSH agent (only if no password and
key is provided)
- Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (only if no
password and key is provided)
- Plain username/password auth, if a password was given (if password is
provided)
"""
if key_files and key_material:
raise ValueError(('key_files and key_material arguments are '
'mutually exclusive'))
super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port,
username=username,
password=password,
key=key,
key_files=key_files,
timeout=timeout)
self.key_material = key_material
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.logger = self._get_and_setup_logger()
def connect(self):
conninfo = {'hostname': self.hostname,
'port': self.port,
'username': self.username,
'allow_agent': False,
'look_for_keys': False}
if self.password:
conninfo['password'] = self.password
if self.key_files:
conninfo['key_filename'] = self.key_files
if self.key_material:
conninfo['pkey'] = self._get_pkey_object(key=self.key_material)
if not self.password and not (self.key_files or self.key_material):
conninfo['allow_agent'] = True
conninfo['look_for_keys'] = True
if self.timeout:
conninfo['timeout'] = self.timeout
extra = {'_hostname': self.hostname, '_port': self.port,
'_username': self.username, '_timeout': self.timeout}
self.logger.debug('Connecting to server', extra=extra)
self.client.connect(**conninfo)
return True
def put(self, path, contents=None, chmod=None, mode='w'):
extra = {'_path': path, '_mode': mode, '_chmod': chmod}
self.logger.debug('Uploading file', extra=extra)
sftp = self.client.open_sftp()
# less than ideal, but we need to mkdir stuff otherwise file() fails
head, tail = psplit(path)
if path[0] == "/":
sftp.chdir("/")
else:
# Relative path - start from a home directory (~)
sftp.chdir('.')
for part in head.split("/"):
if part != "":
try:
sftp.mkdir(part)
except IOError:
# so, there doesn't seem to be a way to
# catch EEXIST consistently *sigh*
pass
sftp.chdir(part)
cwd = sftp.getcwd()
ak = sftp.file(tail, mode=mode)
ak.write(contents)
if chmod is not None:
ak.chmod(chmod)
ak.close()
sftp.close()
if path[0] == '/':
file_path = path
else:
file_path = pjoin(cwd, path)
return file_path
def delete(self, path):
extra = {'_path': path}
self.logger.debug('Deleting file', extra=extra)
sftp = self.client.open_sftp()
sftp.unlink(path)
sftp.close()
return True
def run(self, cmd, timeout=None):
"""
Note: This function is based on paramiko's exec_command()
method.
:param timeout: How long to wait (in seconds) for the command to
finish (optional).
:type timeout: ``float``
"""
extra = {'_cmd': cmd}
self.logger.debug('Executing command', extra=extra)
# Use the system default buffer size
bufsize = -1
transport = self.client.get_transport()
chan = transport.open_session()
start_time = time.time()
chan.exec_command(cmd)
stdout = StringIO()
stderr = StringIO()
# Create a stdin file and immediately close it to prevent any
# interactive script from hanging the process.
stdin = chan.makefile('wb', bufsize)
stdin.close()
# Receive all the output
# Note #1: This is used instead of chan.makefile approach to prevent
# buffering issues and hanging if the executed command produces a lot
# of output.
#
# Note #2: If you are going to remove "ready" checks inside the loop
# you are going to have a bad time. Trying to consume from a channel
# which is not ready will block for indefinitely.
exit_status_ready = chan.exit_status_ready()
while not exit_status_ready:
current_time = time.time()
elapsed_time = (current_time - start_time)
if timeout and (elapsed_time > timeout):
# TODO: Is this the right way to clean up?
chan.close()
raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout)
if chan.recv_ready():
data = chan.recv(self.CHUNK_SIZE)
while data:
stdout.write(b(data).decode('utf-8'))
ready = chan.recv_ready()
if not ready:
break
data = chan.recv(self.CHUNK_SIZE)
if chan.recv_stderr_ready():
data = chan.recv_stderr(self.CHUNK_SIZE)
while data:
stderr.write(b(data).decode('utf-8'))
ready = chan.recv_stderr_ready()
if not ready:
break
data = chan.recv_stderr(self.CHUNK_SIZE)
# We need to check the exist status here, because the command could
# print some output and exit during this sleep bellow.
exit_status_ready = chan.exit_status_ready()
if exit_status_ready:
break
# Short sleep to prevent busy waiting
time.sleep(self.SLEEP_DELAY)
# Receive the exit status code of the command we ran.
status = chan.recv_exit_status()
stdout = stdout.getvalue()
stderr = stderr.getvalue()
extra = {'_status': status, '_stdout': stdout, '_stderr': stderr}
self.logger.debug('Command finished', extra=extra)
return [stdout, stderr, status]
def close(self):
self.logger.debug('Closing server connection')
self.client.close()
return True
def _get_pkey_object(self, key):
"""
Try to detect private key type and return paramiko.PKey object.
"""
for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]:
try:
key = cls.from_private_key(StringIO(key))
except paramiko.ssh_exception.SSHException:
# Invalid key, try other key type
pass
else:
return key
msg = 'Invalid or unsupported key type'
raise paramiko.ssh_exception.SSHException(msg)
class ShellOutSSHClient(BaseSSHClient):
"""
This client shells out to "ssh" binary to run commands on the remote
server.
Note: This client should not be used in production.
"""
def __init__(self, hostname, port=22, username='root', password=None,
key=None, key_files=None, timeout=None):
super(ShellOutSSHClient, self).__init__(hostname=hostname,
port=port, username=username,
password=password,
key=key,
key_files=key_files,
timeout=timeout)
if self.password:
raise ValueError('ShellOutSSHClient only supports key auth')
child = subprocess.Popen(['ssh'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
child.communicate()
if child.returncode == 127:
raise ValueError('ssh client is not available')
self.logger = self._get_and_setup_logger()
def connect(self):
"""
This client doesn't support persistent connections establish a new
connection every time "run" method is called.
"""
return True
def run(self, cmd):
return self._run_remote_shell_command([cmd])
def put(self, path, contents=None, chmod=None, mode='w'):
if mode == 'w':
redirect = '>'
elif mode == 'a':
redirect = '>>'
else:
raise ValueError('Invalid mode: ' + mode)
cmd = ['echo "%s" %s %s' % (contents, redirect, path)]
self._run_remote_shell_command(cmd)
return path
def delete(self, path):
cmd = ['rm', '-rf', path]
self._run_remote_shell_command(cmd)
return True
def close(self):
return True
def _get_base_ssh_command(self):
cmd = ['ssh']
if self.key_files:
cmd += ['-i', self.key_files]
if self.timeout:
cmd += ['-oConnectTimeout=%s' % (self.timeout)]
cmd += ['%s@%s' % (self.username, self.hostname)]
return cmd
def _run_remote_shell_command(self, cmd):
"""
Run a command on a remote server.
:param cmd: Command to run.
:type cmd: ``list`` of ``str``
:return: Command stdout, stderr and status code.
:rtype: ``tuple``
"""
base_cmd = self._get_base_ssh_command()
full_cmd = base_cmd + [' '.join(cmd)]
self.logger.debug('Executing command: "%s"' % (' '.join(full_cmd)))
child = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
return (stdout, stderr, child.returncode)
class MockSSHClient(BaseSSHClient):
pass
SSHClient = ParamikoSSHClient
if not have_paramiko:
SSHClient = MockSSHClient
| apache-2.0 |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/curses/has_key.py | 154 | 5627 |
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = key in _curses
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print i
| gpl-3.0 |
shanzhenren/ClusType | src/algorithm.py | 1 | 10630 | from collections import defaultdict
from operator import itemgetter
from math import log, sqrt
import random as rn
import time
from numpy import * # install numpy
from scipy import * # install scipy
from numpy.linalg import norm
import numpy.linalg as npl
from scipy.sparse import *
import scipy.sparse.linalg as spsl
from sklearn.preprocessing import normalize ### install from http://scikit-learn.org/stable/
def create_matrix(size_row, size_col):
return csr_matrix((size_row, size_col))
def create_dense_matrix(size_row, size_col):
return mat(zeros((size_row, size_col)))
def set_Y(train_mid, seedMention_tid_score, mid_mention, size_row, size_col):
row = []
col = []
val = []
num_NIL = 0
num_target = 0
NIL_set = set()
for mid in train_mid:
# in training set
mention = mid_mention[mid]
if mention in seedMention_tid_score:
# in ground truth
tid = seedMention_tid_score[mention][0]
score = seedMention_tid_score[mention][1]
if tid == (size_col - 1):
# NIL
num_NIL += 1
# NIL_set.add((mid, tid, score))
NIL_set.add((mid, tid, 1.0))
else:
num_target += 1
row.append(mid)
col.append(tid)
# val.append(score)
val.append(1.0)
if num_target < 1:
print 'No target type entity seeded!!!!'
### random sample NIL examples
# neg_size = num_NIL
neg_size = min(num_NIL, 5*num_target)
# neg_size = int(min(num_NIL, num_target/(size_col-1.0)))
neg_example = rn.sample(NIL_set, neg_size)
for entry in neg_example:
row.append(entry[0])
col.append(entry[1])
val.append(entry[2])
Y = coo_matrix((val, (row, col)), shape = (size_row, size_col)).tocsr()
# print Y.nnz, '#ground truth mentions in Y'
print 'Percent Seeded Mention:', (Y.nnz+0.0)/len(mid_mention) * 100, '% of', len(mid_mention), \
', #target/All = ', num_target/(Y.nnz+0.0) * 100
return Y
def update_Y_closed_form(S_M, Y, Y0, Theta, PiC, gamma, mu):
# row = []
# col = []
# val = []
for j in range(PiC.shape[1]):
# for each candidate j, slicing to get submatrix
mid_list = PiC[:, j].nonzero()[0].tolist()
Y0_j = Y0[mid_list, :]
Theta_j = Theta[mid_list, :]
S_M_j = S_M[mid_list, :][:, mid_list]
if S_M_j.shape[0] * S_M_j.shape[1] < 2520800000:
# transform to dense matrix
tmp = ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j).todense()
Y_j = npl.inv(tmp) * (Theta_j + mu*Y0_j)
Y[mid_list, :] = Y_j
# # sparse
# Yc = spsl.inv((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j) * (Theta_j + mu*Y0_j)
# Yc = spsl.spsolve( ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j), (Theta_j + mu*Y0_j) )
# row_idx, col_idx = Yc.nonzero()
# for i in range(len(row_idx)):
# mid = mid_list[row_idx[i]]
# row.append(mid)
# col.append(col_idx[i])
# val.append(Yc[row_idx[i], col_idx[i]])
if j % 1000 == 0:
print 'candidate', j
# Y = coo_matrix((val, (row, col)), shape = Y0.shape).tocsr()
return Y
def inverse_matrix(X):
X.data[:] = 1/(X.data)
return X
def clustype_appx(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, K):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
def clustype_noClus_inner(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, tol, C, PL, PR, Y):
PiLL = PiL.T*PiL
PiRR = PiR.T*PiR
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR))
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL))
obj_old = obj
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
rel = abs(obj_old - obj)/obj_old
if (i+1) % 10 == 0:
print '\tClusType_noClus_inner Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
if rel < tol:
print ' ClusType_noClus_inner Converges!'
Y = PiC*C + PiL*PL + PiR*PR
return (Y, C, PL, PR)
# Y = PiC*C
# Y = PiL*PL + PiR*PR
Y = PiC*C + PiL*PL + PiR*PR
print ' ClusType_noClus_inner Reach MaxIter!'
return (Y, C, PL, PR)
def clustype_noClus_PiLR(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### pre-compuatation #############################################################
m = PiC.shape[0]
n, l = S_L.shape
PiLL = PiL.T*PiL # l-by-l
PiRR = PiR.T*PiR # l-by-l
### initialization #############################################################
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O*(norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + theta + mu*Y0)
C = 1/2.0 * ( S_L*PL + S_R*PR )
PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * lambda_O*PiL.T*(Y-PiR*PR)
PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * lambda_O*PiR.T*(Y-PiL*PL)
obj_old = obj
theta = PiL*PL + PiR*PR
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiL*PL + PiR*PR
return Y
def clustype_noClus_PiC(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER):
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C = create_dense_matrix(n, T)
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Y = Y0.copy()
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
### Start algorithm #############################################################
for i in range(ITER):
lambda4 = 1+gamma+mu
Y = 1/lambda4 * (gamma*S_M*Y + PiC*C + mu*Y0)
C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*Y )
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \
lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y))
if (i+1) % 10 == 0:
print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return Y
def clustype_onlycandidate(S_L, S_R, PiC, PiL, PiR, Y0, T, ITER):
### pre-compuatation #############################################################
u = 0.5 # u=0.5
### initialization #############################################################
m = PiC.shape[0]
n, l = S_L.shape
C0 = PiC.T * Y0
C = C0.copy()
PL = create_dense_matrix(l, T)
PR = create_dense_matrix(l, T)
Theta = PiC*C + PiL*PL + PiR*PR
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
### Start algorithm #############################################################
for i in range(ITER):
C = 1/(2+u) * (S_L*PL + S_R*PR + u*C0)
PL = S_L.T*C
PR = S_R.T*C
obj_old = obj
obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0)
if (i+1) % 10 == 0:
print 'ClusType_Cand Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old
Y = PiC*C
return (Y, C, PL, PR)
| gpl-3.0 |
hakatashi/youtube-dl | youtube_dl/extractor/dw.py | 84 | 4098 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
from ..compat import compat_urlparse
class DWIE(InfoExtractor):
IE_NAME = 'dw'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
_TESTS = [{
# video
'url': 'http://www.dw.com/en/intelligent-light/av-19112290',
'md5': '7372046e1815c5a534b43f3c3c36e6e9',
'info_dict': {
'id': '19112290',
'ext': 'mp4',
'title': 'Intelligent light',
'description': 'md5:90e00d5881719f2a6a5827cb74985af1',
'upload_date': '20160311',
}
}, {
# audio
'url': 'http://www.dw.com/en/worldlink-my-business/av-19111941',
'md5': '2814c9a1321c3a51f8a7aeb067a360dd',
'info_dict': {
'id': '19111941',
'ext': 'mp3',
'title': 'WorldLink: My business',
'description': 'md5:bc9ca6e4e063361e21c920c53af12405',
'upload_date': '20160311',
}
}, {
# DW documentaries, only last for one or two weeks
'url': 'http://www.dw.com/en/documentaries-welcome-to-the-90s-2016-05-21/e-19220158-9798',
'md5': '56b6214ef463bfb9a3b71aeb886f3cf1',
'info_dict': {
'id': '19274438',
'ext': 'mp4',
'title': 'Welcome to the 90s – Hip Hop',
'description': 'Welcome to the 90s - The Golden Decade of Hip Hop',
'upload_date': '20160521',
},
'skip': 'Video removed',
}]
def _real_extract(self, url):
media_id = self._match_id(url)
webpage = self._download_webpage(url, media_id)
hidden_inputs = self._hidden_inputs(webpage)
title = hidden_inputs['media_title']
media_id = hidden_inputs.get('media_id') or media_id
if hidden_inputs.get('player_type') == 'video' and hidden_inputs.get('stream_file') == '1':
formats = self._extract_smil_formats(
'http://www.dw.com/smil/v-%s' % media_id, media_id,
transform_source=lambda s: s.replace(
'rtmp://tv-od.dw.de/flash/',
'http://tv-download.dw.de/dwtv_video/flv/'))
self._sort_formats(formats)
else:
formats = [{'url': hidden_inputs['file_name']}]
upload_date = hidden_inputs.get('display_date')
if not upload_date:
upload_date = self._html_search_regex(
r'<span[^>]+class="date">([0-9.]+)\s*\|', webpage,
'upload date', default=None)
upload_date = unified_strdate(upload_date)
return {
'id': media_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': hidden_inputs.get('preview_image'),
'duration': int_or_none(hidden_inputs.get('file_duration')),
'upload_date': upload_date,
'formats': formats,
}
class DWArticleIE(InfoExtractor):
IE_NAME = 'dw:article'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
_TEST = {
'url': 'http://www.dw.com/en/no-hope-limited-options-for-refugees-in-idomeni/a-19111009',
'md5': '8ca657f9d068bbef74d6fc38b97fc869',
'info_dict': {
'id': '19105868',
'ext': 'mp4',
'title': 'The harsh life of refugees in Idomeni',
'description': 'md5:196015cc7e48ebf474db9399420043c7',
'upload_date': '20160310',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
hidden_inputs = self._hidden_inputs(webpage)
media_id = hidden_inputs['media_id']
media_path = self._search_regex(r'href="([^"]+av-%s)"\s+class="overlayLink"' % media_id, webpage, 'media url')
media_url = compat_urlparse.urljoin(url, media_path)
return self.url_result(media_url, 'DW', media_id)
| unlicense |
DataReplyUK/datareplyuk | GenesAssociation/spark-2.0.0-bin-hadoop2.7/python/pyspark/ml/param/shared.py | 4 | 21476 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
from pyspark.ml.param import *
class HasMaxIter(Params):
"""
Mixin for param maxIter: max number of iterations (>= 0).
"""
maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasMaxIter, self).__init__()
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
def getMaxIter(self):
"""
Gets the value of maxIter or its default value.
"""
return self.getOrDefault(self.maxIter)
class HasRegParam(Params):
"""
Mixin for param regParam: regularization parameter (>= 0).
"""
regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasRegParam, self).__init__()
def setRegParam(self, value):
"""
Sets the value of :py:attr:`regParam`.
"""
return self._set(regParam=value)
def getRegParam(self):
"""
Gets the value of regParam or its default value.
"""
return self.getOrDefault(self.regParam)
class HasFeaturesCol(Params):
"""
Mixin for param featuresCol: features column name.
"""
featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasFeaturesCol, self).__init__()
self._setDefault(featuresCol='features')
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def getFeaturesCol(self):
"""
Gets the value of featuresCol or its default value.
"""
return self.getOrDefault(self.featuresCol)
class HasLabelCol(Params):
"""
Mixin for param labelCol: label column name.
"""
labelCol = Param(Params._dummy(), "labelCol", "label column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasLabelCol, self).__init__()
self._setDefault(labelCol='label')
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def getLabelCol(self):
"""
Gets the value of labelCol or its default value.
"""
return self.getOrDefault(self.labelCol)
class HasPredictionCol(Params):
"""
Mixin for param predictionCol: prediction column name.
"""
predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasPredictionCol, self).__init__()
self._setDefault(predictionCol='prediction')
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
def getPredictionCol(self):
"""
Gets the value of predictionCol or its default value.
"""
return self.getOrDefault(self.predictionCol)
class HasProbabilityCol(Params):
"""
Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
"""
probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasProbabilityCol, self).__init__()
self._setDefault(probabilityCol='probability')
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
def getProbabilityCol(self):
"""
Gets the value of probabilityCol or its default value.
"""
return self.getOrDefault(self.probabilityCol)
class HasRawPredictionCol(Params):
"""
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasRawPredictionCol, self).__init__()
self._setDefault(rawPredictionCol='rawPrediction')
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
def getRawPredictionCol(self):
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class HasInputCol(Params):
"""
Mixin for param inputCol: input column name.
"""
inputCol = Param(Params._dummy(), "inputCol", "input column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasInputCol, self).__init__()
def setInputCol(self, value):
"""
Sets the value of :py:attr:`inputCol`.
"""
return self._set(inputCol=value)
def getInputCol(self):
"""
Gets the value of inputCol or its default value.
"""
return self.getOrDefault(self.inputCol)
class HasInputCols(Params):
"""
Mixin for param inputCols: input column names.
"""
inputCols = Param(Params._dummy(), "inputCols", "input column names.", typeConverter=TypeConverters.toListString)
def __init__(self):
super(HasInputCols, self).__init__()
def setInputCols(self, value):
"""
Sets the value of :py:attr:`inputCols`.
"""
return self._set(inputCols=value)
def getInputCols(self):
"""
Gets the value of inputCols or its default value.
"""
return self.getOrDefault(self.inputCols)
class HasOutputCol(Params):
"""
Mixin for param outputCol: output column name.
"""
outputCol = Param(Params._dummy(), "outputCol", "output column name.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasOutputCol, self).__init__()
self._setDefault(outputCol=self.uid + '__output')
def setOutputCol(self, value):
"""
Sets the value of :py:attr:`outputCol`.
"""
return self._set(outputCol=value)
def getOutputCol(self):
"""
Gets the value of outputCol or its default value.
"""
return self.getOrDefault(self.outputCol)
class HasNumFeatures(Params):
"""
Mixin for param numFeatures: number of features.
"""
numFeatures = Param(Params._dummy(), "numFeatures", "number of features.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasNumFeatures, self).__init__()
def setNumFeatures(self, value):
"""
Sets the value of :py:attr:`numFeatures`.
"""
return self._set(numFeatures=value)
def getNumFeatures(self):
"""
Gets the value of numFeatures or its default value.
"""
return self.getOrDefault(self.numFeatures)
class HasCheckpointInterval(Params):
"""
Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.
"""
checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasCheckpointInterval, self).__init__()
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
def getCheckpointInterval(self):
"""
Gets the value of checkpointInterval or its default value.
"""
return self.getOrDefault(self.checkpointInterval)
class HasSeed(Params):
"""
Mixin for param seed: random seed.
"""
seed = Param(Params._dummy(), "seed", "random seed.", typeConverter=TypeConverters.toInt)
def __init__(self):
super(HasSeed, self).__init__()
self._setDefault(seed=hash(type(self).__name__))
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def getSeed(self):
"""
Gets the value of seed or its default value.
"""
return self.getOrDefault(self.seed)
class HasTol(Params):
"""
Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0).
"""
tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasTol, self).__init__()
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
def getTol(self):
"""
Gets the value of tol or its default value.
"""
return self.getOrDefault(self.tol)
class HasStepSize(Params):
"""
Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0).
"""
stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization (>= 0).", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasStepSize, self).__init__()
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
class HasHandleInvalid(Params):
"""
Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
"""
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasHandleInvalid, self).__init__()
def setHandleInvalid(self, value):
"""
Sets the value of :py:attr:`handleInvalid`.
"""
return self._set(handleInvalid=value)
def getHandleInvalid(self):
"""
Gets the value of handleInvalid or its default value.
"""
return self.getOrDefault(self.handleInvalid)
class HasElasticNetParam(Params):
"""
Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
"""
elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", typeConverter=TypeConverters.toFloat)
def __init__(self):
super(HasElasticNetParam, self).__init__()
self._setDefault(elasticNetParam=0.0)
def setElasticNetParam(self, value):
"""
Sets the value of :py:attr:`elasticNetParam`.
"""
return self._set(elasticNetParam=value)
def getElasticNetParam(self):
"""
Gets the value of elasticNetParam or its default value.
"""
return self.getOrDefault(self.elasticNetParam)
class HasFitIntercept(Params):
"""
Mixin for param fitIntercept: whether to fit an intercept term.
"""
fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasFitIntercept, self).__init__()
self._setDefault(fitIntercept=True)
def setFitIntercept(self, value):
"""
Sets the value of :py:attr:`fitIntercept`.
"""
return self._set(fitIntercept=value)
def getFitIntercept(self):
"""
Gets the value of fitIntercept or its default value.
"""
return self.getOrDefault(self.fitIntercept)
class HasStandardization(Params):
"""
Mixin for param standardization: whether to standardize the training features before fitting the model.
"""
standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(HasStandardization, self).__init__()
self._setDefault(standardization=True)
def setStandardization(self, value):
"""
Sets the value of :py:attr:`standardization`.
"""
return self._set(standardization=value)
def getStandardization(self):
"""
Gets the value of standardization or its default value.
"""
return self.getOrDefault(self.standardization)
class HasThresholds(Params):
"""
Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.
"""
thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values >= 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class' threshold.", typeConverter=TypeConverters.toListFloat)
def __init__(self):
super(HasThresholds, self).__init__()
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
def getThresholds(self):
"""
Gets the value of thresholds or its default value.
"""
return self.getOrDefault(self.thresholds)
class HasWeightCol(Params):
"""
Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
"""
weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasWeightCol, self).__init__()
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def getWeightCol(self):
"""
Gets the value of weightCol or its default value.
"""
return self.getOrDefault(self.weightCol)
class HasSolver(Params):
"""
Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
"""
solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasSolver, self).__init__()
self._setDefault(solver='auto')
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
class HasVarianceCol(Params):
"""
Mixin for param varianceCol: column name for the biased sample variance of prediction.
"""
varianceCol = Param(Params._dummy(), "varianceCol", "column name for the biased sample variance of prediction.", typeConverter=TypeConverters.toString)
def __init__(self):
super(HasVarianceCol, self).__init__()
def setVarianceCol(self, value):
"""
Sets the value of :py:attr:`varianceCol`.
"""
return self._set(varianceCol=value)
def getVarianceCol(self):
"""
Gets the value of varianceCol or its default value.
"""
return self.getOrDefault(self.varianceCol)
class DecisionTreeParams(Params):
"""
Mixin for Decision Tree parameters.
"""
maxDepth = Param(Params._dummy(), "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.", typeConverter=TypeConverters.toInt)
maxBins = Param(Params._dummy(), "maxBins", "Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.", typeConverter=TypeConverters.toInt)
minInstancesPerNode = Param(Params._dummy(), "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.", typeConverter=TypeConverters.toInt)
minInfoGain = Param(Params._dummy(), "minInfoGain", "Minimum information gain for a split to be considered at a tree node.", typeConverter=TypeConverters.toFloat)
maxMemoryInMB = Param(Params._dummy(), "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small, then 1 node will be split per iteration, and its aggregates may exceed this size.", typeConverter=TypeConverters.toInt)
cacheNodeIds = Param(Params._dummy(), "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. Users can set how often should the cache be checkpointed or disable it by setting checkpointInterval.", typeConverter=TypeConverters.toBoolean)
def __init__(self):
super(DecisionTreeParams, self).__init__()
def setMaxDepth(self, value):
"""
Sets the value of :py:attr:`maxDepth`.
"""
return self._set(maxDepth=value)
def getMaxDepth(self):
"""
Gets the value of maxDepth or its default value.
"""
return self.getOrDefault(self.maxDepth)
def setMaxBins(self, value):
"""
Sets the value of :py:attr:`maxBins`.
"""
return self._set(maxBins=value)
def getMaxBins(self):
"""
Gets the value of maxBins or its default value.
"""
return self.getOrDefault(self.maxBins)
def setMinInstancesPerNode(self, value):
"""
Sets the value of :py:attr:`minInstancesPerNode`.
"""
return self._set(minInstancesPerNode=value)
def getMinInstancesPerNode(self):
"""
Gets the value of minInstancesPerNode or its default value.
"""
return self.getOrDefault(self.minInstancesPerNode)
def setMinInfoGain(self, value):
"""
Sets the value of :py:attr:`minInfoGain`.
"""
return self._set(minInfoGain=value)
def getMinInfoGain(self):
"""
Gets the value of minInfoGain or its default value.
"""
return self.getOrDefault(self.minInfoGain)
def setMaxMemoryInMB(self, value):
"""
Sets the value of :py:attr:`maxMemoryInMB`.
"""
return self._set(maxMemoryInMB=value)
def getMaxMemoryInMB(self):
"""
Gets the value of maxMemoryInMB or its default value.
"""
return self.getOrDefault(self.maxMemoryInMB)
def setCacheNodeIds(self, value):
"""
Sets the value of :py:attr:`cacheNodeIds`.
"""
return self._set(cacheNodeIds=value)
def getCacheNodeIds(self):
"""
Gets the value of cacheNodeIds or its default value.
"""
return self.getOrDefault(self.cacheNodeIds)
| apache-2.0 |
hsaputra/tensorflow | tensorflow/python/saved_model/saved_model.py | 18 | 1768 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convenience functions to save a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
# pylint: enable=unused-import
# pylint: disable=wildcard-import
from tensorflow.python.saved_model.simple_save import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"builder",
"constants",
"loader",
"main_op",
"signature_constants",
"signature_def_utils",
"simple_save",
"tag_constants",
"utils",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
cacahootie/geophoto | geophoto/oembed.py | 1 | 1254 |
from json import loads
from urlparse import urlparse
from flask import current_app as app
from flask import request
def thumbUrl(thumb):
return "https://venturelog.imgix.net/articles/" + thumb + \
"?w=200&h=150&fit=crop&crop=entropy&auto=compress,format"
def html(article):
return '<div><p>' + article['headline'] + '</p><p>' + article['leader'] + '</p></div>'
def oembed(url):
path = urlparse(url).path + "?format=json"
with app.test_client() as c:
rv = c.get(path)
try:
article = loads(rv.data)
except ValueError:
print url
print path
raise
if request.args.get("format") == "html":
return html(article)
return {
"success": True,
"type": "rich",
"version": "1.0",
"provider_name": "Venturelog",
"provider_url": "http://www.venturelog.io",
"title": article['headline'],
"author_name": "Brad Smith",
"author_url": "http://www.venturelog.io",
"height": "300",
"width": "800",
"thumbnail_width": "200",
"thumbnail_height": str(int(200 * float(3)/4)),
"thumbnail_url": thumbUrl(article['thumbnail']),
"html": html(article)
}
| mit |
ArmyOfPirates/openwrt | scripts/cfe-partition-tag.py | 27 | 5514 | #!/usr/bin/env python3
"""
CFE Partition Tag
{
u32 part_id;
u32 part_size;
u16 flags;
char part_name[33];
char part_version[21];
u32 part_crc32;
}
"""
import argparse
import os
import struct
PART_NAME_SIZE = 33
PART_VERSION_SIZE = 21
CRC32_INIT = 0xFFFFFFFF
CRC32_TABLE = [
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
]
def auto_int(x):
return int(x, 0)
def crc32(bytes, size, crc):
i = 0
while (i < size):
crc = (crc >> 8) ^ CRC32_TABLE[(crc ^ bytes[i]) & 0xff]
i += 1
return crc
def str_to_bytes_pad(string, size):
str_bytes = string.encode()
num_bytes = len(str_bytes)
if (num_bytes >= size):
str_bytes = str_bytes[:size - 1] + '\0'.encode()
else:
str_bytes += '\0'.encode() * (size - num_bytes)
return str_bytes
def create_tag(args, in_bytes, size):
crc = crc32(in_bytes, size, CRC32_INIT)
tag = bytearray()
tag += struct.pack('>I', args.part_id)
tag += struct.pack('>I', size)
tag += struct.pack('>H', args.part_flags)
tag += str_to_bytes_pad(args.part_name, PART_NAME_SIZE)
tag += str_to_bytes_pad(args.part_version, PART_VERSION_SIZE)
tag += struct.pack('>I', crc)
return tag
def create_output(args):
in_st = os.stat(args.input_file)
in_size = in_st.st_size
in_f = open(args.input_file, 'r+b')
in_bytes = in_f.read(in_size)
in_f.close()
tag = create_tag(args, in_bytes, in_size)
out_f = open(args.output_file, 'w+b')
out_f.write(tag)
out_f.close()
def main():
global args
parser = argparse.ArgumentParser(description='')
parser.add_argument('--flags',
dest='part_flags',
action='store',
type=auto_int,
help='Partition Flags')
parser.add_argument('--id',
dest='part_id',
action='store',
type=auto_int,
help='Partition ID')
parser.add_argument('--input-file',
dest='input_file',
action='store',
type=str,
help='Input file')
parser.add_argument('--output-file',
dest='output_file',
action='store',
type=str,
help='Output file')
parser.add_argument('--name',
dest='part_name',
action='store',
type=str,
help='Partition Name')
parser.add_argument('--version',
dest='part_version',
action='store',
type=str,
help='Partition Version')
args = parser.parse_args()
if ((not args.part_flags) or
(not args.part_id) or
(not args.input_file) or
(not args.output_file) or
(not args.part_name) or
(not args.part_version)):
parser.print_help()
else:
create_output(args)
main()
| gpl-2.0 |
cms-externals/openloops | scons-local/scons-local-2.3.0/SCons/Tool/gettext.py | 11 | 2093 | """gettext tool
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/gettext.py 2013/03/03 09:48:35 garyo"
#############################################################################
def generate(env,**kw):
import SCons.Tool
from SCons.Tool.GettextCommon \
import _translate, tool_list
for t in tool_list(env['PLATFORM'], env):
env.Tool(t)
env.AddMethod(_translate, 'Translate')
#############################################################################
#############################################################################
def exists(env):
from SCons.Tool.GettextCommon \
import _xgettext_exists, _msginit_exists, \
_msgmerge_exists, _msgfmt_exists
try:
return _xgettext_exists(env) and _msginit_exists(env) \
and _msgmerge_exists(env) and _msgfmt_exists(env)
except:
return False
#############################################################################
| gpl-3.0 |
evancich/apm_min_arm_tone | Tools/scripts/update_wiki.py | 275 | 2870 | #!/usr/bin/env python
'''
update a wordpress wiki page
Andrew Tridgell
May 2013
See http://codex.wordpress.org/XML-RPC_WordPress_API/Posts
'''
import xmlrpclib, sys
from optparse import OptionParser
parser = OptionParser("update_wiki.py [options] <file>")
parser.add_option("--username", help="Wordpress username", default=None)
parser.add_option("--password", help="Wordpress password", default=None)
parser.add_option("--url", help="Wordpress URL", default=None)
parser.add_option("--post-title", help="title of page", default=None)
parser.add_option("--post-id", help="ID of page", default=None)
parser.add_option("--blog-id", help="ID of wiki", default='')
parser.add_option("--list", action='store_true', help="list titles", default=False)
(opts, args) = parser.parse_args()
if opts.url is None:
print("You must supply a base URL for the wordpress site")
sys.exit(1)
if opts.username is None or opts.password is None:
print("You must supply a username and password")
sys.exit(1)
if len(args) == 0 and not opts.list:
print("You must supply a filename containing the content to load")
sys.exit(1)
server = xmlrpclib.ServerProxy(opts.url + '/xmlrpc.php')
def list_posts():
'''list posts'''
posts = server.wp.getPosts(opts.blog_id, opts.username, opts.password,
{ 'post_type' : 'wiki', 'number' : 1000000 },
[ 'post_title', 'post_id' ])
for p in posts:
try:
print('post_id=%s post_title="%s"' % (p['post_id'], p['post_title']))
except Exception:
pass
def find_post_by_title(title):
'''find a post given the title'''
posts = server.wp.getPosts(opts.blog_id, opts.username, opts.password,
{ 'post_type' : 'wiki', 'number' : 1000000 },
[ 'post_title', 'post_id' ])
for p in posts:
if p['post_title'] == title:
return p['post_id']
return None
if opts.list:
list_posts()
sys.exit(0)
if opts.post_title is None and opts.post_id is None:
print("You must supply a post_title or post_id")
sys.exit(1)
post_id = opts.post_id
if post_id is None:
post_id = find_post_by_title(opts.post_title)
if post_id is None:
print('Failed to find post_title "%s"' % opts.post_title)
sys.exit(1)
file = open(args[0])
content = file.read()
file.close()
print("Fetching existing content")
post = server.wp.getPost(opts.blog_id, opts.username, opts.password, post_id)
if str(post['post_content']).strip() == str(content).strip():
print("Content unchanged")
sys.exit(0)
print("Uploading new post_content")
r = server.wp.editPost(opts.blog_id, opts.username, opts.password, post_id, { 'post_content' : content })
if r == True:
print("Upload OK")
sys.exit(0)
print 'result: ', r
sys.exit(1)
| gpl-3.0 |
psaylor/ORCA | server/node_modules/binaryjs/node_modules/binarypack/node_modules/buffercursor/node_modules/verror/node_modules/extsprintf/deps/javascriptlint/javascriptlint/jsparse.py | 21 | 15686 | #!/usr/bin/env python2.6
# vim: ts=4 sw=4 expandtab
""" Parses a script into nodes. """
import bisect
import re
import unittest
import spidermonkey
from spidermonkey import tok, op
from util import JSVersion
_tok_names = dict(zip(
[getattr(tok, prop) for prop in dir(tok)],
['tok.%s' % prop for prop in dir(tok)]
))
_op_names = dict(zip(
[getattr(op, prop) for prop in dir(op)],
['op.%s' % prop for prop in dir(op)]
))
NodePos = spidermonkey.NodePos
class NodePositions:
" Given a string, allows [x] lookups for NodePos line and column numbers."
def __init__(self, text, start_pos=None):
# Find the length of each line and incrementally sum all of the lengths
# to determine the ending position of each line.
self._start_pos = start_pos
self._lines = text.splitlines(True)
lines = [0] + [len(x) for x in self._lines]
for x in range(1, len(lines)):
lines[x] += lines[x-1]
self._line_offsets = lines
def from_offset(self, offset):
line = bisect.bisect(self._line_offsets, offset)-1
col = offset - self._line_offsets[line]
if self._start_pos:
if line == 0:
col += self._start_pos.col
line += self._start_pos.line
return NodePos(line, col)
def to_offset(self, pos):
pos = self._to_rel_pos(pos)
offset = self._line_offsets[pos.line] + pos.col
assert offset <= self._line_offsets[pos.line+1] # out-of-bounds col num
return offset
def text(self, start, end):
assert start <= end
start, end = self._to_rel_pos(start), self._to_rel_pos(end)
# Trim the ending first in case it's a single line.
lines = self._lines[start.line:end.line+1]
lines[-1] = lines[-1][:end.col+1]
lines[0] = lines[0][start.col:]
return ''.join(lines)
def _to_rel_pos(self, pos):
" converts a position to a position relative to self._start_pos "
if not self._start_pos:
return pos
line, col = pos.line, pos.col
line -= self._start_pos.line
if line == 0:
col -= self._start_pos.col
assert line >= 0 and col >= 0 # out-of-bounds node position
return NodePos(line, col)
class NodeRanges:
def __init__(self):
self._offsets = []
def add(self, start, end):
i = bisect.bisect_left(self._offsets, start)
if i % 2 == 1:
i -= 1
start = self._offsets[i]
end = end + 1
j = bisect.bisect_left(self._offsets, end)
if j % 2 == 1:
end = self._offsets[j]
j += 1
self._offsets[i:j] = [start,end]
def has(self, pos):
return bisect.bisect_right(self._offsets, pos) % 2 == 1
class _Node:
def add_child(self, node):
if node:
node.node_index = len(self.kids)
node.parent = self
self.kids.append(node)
def start_pos(self):
try:
return self._start_pos
except AttributeError:
self._start_pos = NodePos(self._start_line, self._start_col)
return self._start_pos
def end_pos(self):
try:
return self._end_pos
except AttributeError:
self._end_pos = NodePos(self._end_line, self._end_col)
return self._end_pos
def __str__(self):
kind = self.kind
if not kind:
kind = '(none)'
return '%s>%s' % (_tok_names[kind], str(self.kids))
def is_equivalent(self, other, are_functions_equiv=False):
if not other:
return False
# Bail out for functions
if not are_functions_equiv:
if self.kind == tok.FUNCTION:
return False
if self.kind == tok.LP and self.opcode == op.CALL:
return False
if self.kind != other.kind:
return False
if self.opcode != other.opcode:
return False
# Check atoms on names, properties, and string constants
if self.kind in (tok.NAME, tok.DOT, tok.STRING) and self.atom != other.atom:
return False
# Check values on numbers
if self.kind == tok.NUMBER and self.dval != other.dval:
return False
# Compare child nodes
if len(self.kids) != len(other.kids):
return False
for i in range(0, len(self.kids)):
# Watch for dead nodes
if not self.kids[i]:
if not other.kids[i]: return True
else: return False
if not self.kids[i].is_equivalent(other.kids[i]):
return False
return True
def isvalidversion(jsversion):
if jsversion is None:
return True
return spidermonkey.is_valid_version(jsversion.version)
def findpossiblecomments(script, node_positions):
pos = 0
single_line_re = r"//[^\r\n]*"
multi_line_re = r"/\*(.*?)\*/"
full_re = "(%s)|(%s)" % (single_line_re, multi_line_re)
comment_re = re.compile(full_re, re.DOTALL)
comments = []
while True:
match = comment_re.search(script, pos)
if not match:
return comments
# Get the comment text
comment_text = script[match.start():match.end()]
if comment_text.startswith('/*'):
comment_text = comment_text[2:-2]
opcode = 'JSOP_C_COMMENT'
else:
comment_text = comment_text[2:]
opcode = 'JSOP_CPP_COMMENT'
opcode = opcode[5:].lower()
start_offset = match.start()
end_offset = match.end()-1
start_pos = node_positions.from_offset(start_offset)
end_pos = node_positions.from_offset(end_offset)
kwargs = {
'kind': 'COMMENT',
'atom': comment_text,
'opcode': opcode,
'_start_line': start_pos.line,
'_start_col': start_pos.col,
'_end_line': end_pos.line,
'_end_col': end_pos.col,
'parent': None,
'kids': [],
'node_index': None
}
comment_node = _Node()
comment_node.__dict__.update(kwargs)
comments.append(comment_node)
# Start searching immediately after the start of the comment in case
# this one was within a string or a regexp.
pos = match.start()+1
def parse(script, jsversion, error_callback, startpos=None):
""" All node positions will be relative to startpos. This allows scripts
to be embedded in a file (for example, HTML).
"""
def _wrapped_callback(line, col, msg):
assert msg.startswith('JSMSG_')
msg = msg[6:].lower()
error_callback(line, col, msg)
startpos = startpos or NodePos(0,0)
jsversion = jsversion or JSVersion.default()
assert isvalidversion(jsversion)
return spidermonkey.parse(script, jsversion.version, jsversion.e4x,
_Node, _wrapped_callback,
startpos.line, startpos.col)
def filtercomments(possible_comments, node_positions, root_node):
comment_ignore_ranges = NodeRanges()
def process(node):
if node.kind == tok.NUMBER:
node.atom = node_positions.text(node.start_pos(), node.end_pos())
elif node.kind == tok.STRING or \
(node.kind == tok.OBJECT and node.opcode == op.REGEXP):
start_offset = node_positions.to_offset(node.start_pos())
end_offset = node_positions.to_offset(node.end_pos()) - 1
comment_ignore_ranges.add(start_offset, end_offset)
for kid in node.kids:
if kid:
process(kid)
process(root_node)
comments = []
for comment in possible_comments:
start_offset = node_positions.to_offset(comment.start_pos())
end_offset = node_positions.to_offset(comment.end_pos())
if comment_ignore_ranges.has(start_offset):
continue
comment_ignore_ranges.add(start_offset, end_offset)
comments.append(comment)
return comments
def findcomments(script, root_node, start_pos=None):
node_positions = NodePositions(script, start_pos)
possible_comments = findpossiblecomments(script, node_positions)
return filtercomments(possible_comments, node_positions, root_node)
def is_compilable_unit(script, jsversion):
jsversion = jsversion or JSVersion.default()
assert isvalidversion(jsversion)
return spidermonkey.is_compilable_unit(script, jsversion.version, jsversion.e4x)
def _dump_node(node, depth=0):
if node is None:
print ' '*depth,
print '(None)'
print
else:
print ' '*depth,
print '%s, %s' % (_tok_names[node.kind], _op_names[node.opcode])
print ' '*depth,
print '%s - %s' % (node.start_pos(), node.end_pos())
if hasattr(node, 'atom'):
print ' '*depth,
print 'atom: %s' % node.atom
if node.no_semi:
print ' '*depth,
print '(no semicolon)'
print
for node in node.kids:
_dump_node(node, depth+1)
def dump_tree(script):
def error_callback(line, col, msg):
print '(%i, %i): %s', (line, col, msg)
node = parse(script, None, error_callback)
_dump_node(node)
class TestComments(unittest.TestCase):
def _test(self, script, expected_comments):
root = parse(script, None, lambda line, col, msg: None)
comments = findcomments(script, root)
encountered_comments = [node.atom for node in comments]
self.assertEquals(encountered_comments, list(expected_comments))
def testSimpleComments(self):
self._test('re = /\//g', ())
self._test('re = /\///g', ())
self._test('re = /\////g', ('g',))
def testCComments(self):
self._test('/*a*//*b*/', ('a', 'b'))
self._test('/*a\r\na*//*b\r\nb*/', ('a\r\na', 'b\r\nb'))
self._test('a//*b*/c', ('*b*/c',))
self._test('a///*b*/c', ('/*b*/c',))
self._test('a/*//*/;', ('//',))
self._test('a/*b*/+/*c*/d', ('b', 'c'))
class TestNodePositions(unittest.TestCase):
def _test(self, text, expected_lines, expected_cols):
# Get a NodePos list
positions = NodePositions(text)
positions = [positions.from_offset(i) for i in range(0, len(text))]
encountered_lines = ''.join([str(x.line) for x in positions])
encountered_cols = ''.join([str(x.col) for x in positions])
self.assertEquals(encountered_lines, expected_lines.replace(' ', ''))
self.assertEquals(encountered_cols, expected_cols.replace(' ', ''))
def testSimple(self):
self._test(
'abc\r\ndef\nghi\n\nj',
'0000 0 1111 2222 3 4',
'0123 4 0123 0123 0 0'
)
self._test(
'\rabc',
'0 111',
'0 012'
)
def testText(self):
pos = NodePositions('abc\r\ndef\n\nghi')
self.assertEquals(pos.text(NodePos(0, 0), NodePos(0, 0)), 'a')
self.assertEquals(pos.text(NodePos(0, 0), NodePos(0, 2)), 'abc')
self.assertEquals(pos.text(NodePos(0, 2), NodePos(1, 2)), 'c\r\ndef')
def testOffset(self):
pos = NodePositions('abc\r\ndef\n\nghi')
self.assertEquals(pos.to_offset(NodePos(0, 2)), 2)
self.assertEquals(pos.to_offset(NodePos(1, 0)), 5)
self.assertEquals(pos.to_offset(NodePos(3, 1)), 11)
def testStartPos(self):
pos = NodePositions('abc\r\ndef\n\nghi', NodePos(3,4))
self.assertEquals(pos.to_offset(NodePos(3, 4)), 0)
self.assertEquals(pos.to_offset(NodePos(3, 5)), 1)
self.assertEquals(pos.from_offset(0), NodePos(3, 4))
self.assertEquals(pos.text(NodePos(3, 4), NodePos(3, 4)), 'a')
self.assertEquals(pos.text(NodePos(3, 4), NodePos(3, 6)), 'abc')
self.assertEquals(pos.text(NodePos(3, 6), NodePos(4, 2)), 'c\r\ndef')
class TestNodeRanges(unittest.TestCase):
def testAdd(self):
r = NodeRanges()
r.add(5, 10)
self.assertEquals(r._offsets, [5,11])
r.add(15, 20)
self.assertEquals(r._offsets, [5,11,15,21])
r.add(21,22)
self.assertEquals(r._offsets, [5,11,15,23])
r.add(4,5)
self.assertEquals(r._offsets, [4,11,15,23])
r.add(9,11)
self.assertEquals(r._offsets, [4,12,15,23])
r.add(10,20)
self.assertEquals(r._offsets, [4,23])
r.add(4,22)
self.assertEquals(r._offsets, [4,23])
r.add(30,30)
self.assertEquals(r._offsets, [4,23,30,31])
def testHas(self):
r = NodeRanges()
r.add(5, 10)
r.add(15, 15)
assert not r.has(4)
assert r.has(5)
assert r.has(6)
assert r.has(9)
assert r.has(10)
assert not r.has(14)
assert r.has(15)
assert not r.has(16)
class TestCompilableUnit(unittest.TestCase):
def test(self):
tests = (
('var s = "', False),
('bogon()', True),
('int syntax_error;', True),
('a /* b', False),
('re = /.*', False),
('{ // missing curly', False)
)
for text, expected in tests:
encountered = is_compilable_unit(text, JSVersion.default())
self.assertEquals(encountered, expected)
# NOTE: This seems like a bug.
self.assert_(is_compilable_unit("/* test", JSVersion.default()))
class TestLineOffset(unittest.TestCase):
def testErrorPos(self):
def geterror(script, startpos):
errors = []
def onerror(line, col, msg):
errors.append((line, col, msg))
parse(script, None, onerror, startpos)
self.assertEquals(len(errors), 1)
return errors[0]
self.assertEquals(geterror(' ?', None), (0, 1, 'syntax_error'))
self.assertEquals(geterror('\n ?', None), (1, 1, 'syntax_error'))
self.assertEquals(geterror(' ?', NodePos(1,1)), (1, 2, 'syntax_error'))
self.assertEquals(geterror('\n ?', NodePos(1,1)), (2, 1, 'syntax_error'))
def testNodePos(self):
def getnodepos(script, startpos):
root = parse(script, None, None, startpos)
self.assertEquals(root.kind, tok.LC)
var, = root.kids
self.assertEquals(var.kind, tok.VAR)
return var.start_pos()
self.assertEquals(getnodepos('var x;', None), NodePos(0,0))
self.assertEquals(getnodepos(' var x;', None), NodePos(0,1))
self.assertEquals(getnodepos('\n\n var x;', None), NodePos(2,1))
self.assertEquals(getnodepos('var x;', NodePos(3,4)), NodePos(3,4))
self.assertEquals(getnodepos(' var x;', NodePos(3,4)), NodePos(3,5))
self.assertEquals(getnodepos('\n\n var x;', NodePos(3,4)), NodePos(5,1))
def testComments(self):
def testcomment(comment, startpos, expectedpos):
root = parse(comment, None, None, startpos)
comment, = findcomments(comment, root, startpos)
self.assertEquals(comment.start_pos(), expectedpos)
for comment in ('/*comment*/', '//comment'):
testcomment(comment, None, NodePos(0,0))
testcomment(' %s' % comment, None, NodePos(0,1))
testcomment('\n\n %s' % comment, None, NodePos(2,1))
testcomment('%s' % comment, NodePos(3,4), NodePos(3,4))
testcomment(' %s' % comment, NodePos(3,4), NodePos(3,5))
testcomment('\n\n %s' % comment, NodePos(3,4), NodePos(5,1))
if __name__ == '__main__':
unittest.main()
| mit |
qsnake/werkzeug | werkzeug/http.py | 25 | 19243 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import inspect
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
from urllib2 import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError: # pragma: no cover
from md5 import new as md5
#: HTTP_STATUS_CODES is "exported" from this module.
#: XXX: move to werkzeug.consts or something
from werkzeug._internal import HTTP_STATUS_CODES
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_pop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade'
])
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename = False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in options.iteritems():
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iterable.iteritems():
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type: text/html', {'mimetype': 'text/html'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = parts.next()[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`CacheControl` object is changed.
:param cls: the class for the returned object. By default
:class:`RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a :class:`HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`HeaderSet` object is changed.
:return: a :class:`HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`Authorization` object.
:param value: the authorization header to parse.
:return: a :class:`Authorization` object or `None`.
"""
if not value:
return
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == 'basic':
try:
username, password = auth_info.decode('base64').split(':', 1)
except Exception, e:
return
return Authorization('basic', {'username': username,
'password': password})
elif auth_type == 'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'nc', 'cnonce', \
'response':
if not key in auth_map:
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate`
object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a
value on the :class:`WWWAuthenticate` object is changed.
:return: a :class:`WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, basestring):
last_modified = parse_date(last_modified)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_pop_headers
# circular dependency fun
from werkzeug.datastructures import Headers, Accept, RequestCacheControl, \
ResponseCacheControl, HeaderSet, ETags, Authorization, \
WWWAuthenticate
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, LanguageAccept
| bsd-3-clause |
tectronics/dojango | dojango/data/__init__.py | 12 | 6298 | import re
__all__ = ('QueryInfo', 'QueryReadStoreInfo',
'JsonRestStoreInfo', 'JsonQueryRestStoreInfo',)
class QueryInfoFeatures(object):
sorting = True
paging = False
class QueryInfo(object):
'''Usage (is that the right solution?):
info = QueryInfo(request)
info.extract()
queryset = extract.process(Object.objects.all())
'''
start = 0
end = 25
filters = {}
sorting = [] # key=field // value=descending(True/False)
request = None
max_count = 25
def __init__(self, request, max_count=None, **kwargs):
self.request = request
if max_count is not None:
self.max_count = max_count
def extract(self):
self.set_paging()
self.set_sorting()
self.set_filters()
def set_paging(self):
"""Needs to be implemented in a subclass"""
pass
def set_sorting(self):
pass
def set_filters(self):
"""Needs to be implemented in a subclass"""
pass
def process(self, queryset):
# maybe using Django's paginator
return queryset.filter(**self.filters).order_by(*self.sorting)[self.start:self.end]
class QueryReadStoreInfo(QueryInfo):
"""
A helper to evaluate a request from a dojox.data.QueryReadStore
and extracting the following information from it:
- paging
- sorting
- filters
Parameters could be passed within GET or POST.
"""
def set_paging(self):
start = self.request[self.request.method].pop('start', 0)
# TODO: start = 1???
count = self.request[self.request.method].pop('count', 25)
#if not is_number(end): # The dojo combobox may return "Infinity" tsss
if not is_number(count) or int(count) > self.max_count:
count = self.max_count
self.start = int(start)
self.end = int(start)+int(count)
def set_sorting(self):
# REQUEST['sort']:
# value: -sort_field (descending) / sort_field (ascending)
sort_attr = self.request[self.request.method].pop('sort', None)
if sort_attr:
self.sorting.append(sort_attr)
def set_filters(self):
query_dict = {}
for k,v in self.request[self.request.method].items():
query_dict[k] = v
class JsonRestStoreInfo(QueryReadStoreInfo):
"""
A helper to evaluate a request from a dojox.data.JsonRestStoreInfo
and extracting the following information:
- paging
- filters
The paging parameter is passed within the request header "Range".
Filters are passed via GET (equal to QueryReadStoreInfo).
Sorting is just possible with JsonQueryReadStoreInfo.
"""
def set_paging(self):
# Receiving the following header:
# Range: items=0-24
# Returning: Content-Range: items 0-24/66
if 'RANGE' in self.META:
regexp = re.compile(r"^\s*items=(\d+)-(\d+)", re.I)
match = regexp.match(self.META['RANGE'])
if match:
start, end = match.groups()
start, end = int(start), int(end)+1 # range-end means including that element!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# sorting is not available in the normal JsonRestStore
pass
class JsonQueryRestStoreInfo(QueryInfo):
jsonpath = None
jsonpath_filters = None
jsonpath_sorting = None
jsonpath_paging = None
def __init__(self, request, **kwargs):
"""
Matching the following example jsonpath:
/path/[?(@.field1='searchterm*'&@.field2='*search*')][/@['field1'],/@['field2']][0:24]
The last part of the URL will contain a JSONPath-query:
[filter][sort][start:end:step]
"""
path = request.path
if not path.endswith("/"):
path = path + "/"
# assuming that a least one /path/ will be before the jsonpath query
# and that the character [ initiates and ] ends the jsonpath
# [ will be removed from the start and ] from the end
match = re.match(r'^/.*/(\[.*\])/$', path)
if match:
self.jsonpath = match.groups()[0]
if self.jsonpath:
# now we remove the starting [ and ending ] and also splitting it via ][
parts = self.jsonpath[1:-1].split("][")
for part in parts:
if part.startswith("?"):
self.jsonpath_filters = part
elif re.match(r'^[/\\].*$', part):
self.jsonpath_sorting = part
# [start:end:step]
elif re.match(r'^\d*:\d*:{0,1}\d*$', part):
self.jsonpath_paging = part
super(JsonQueryRestStoreInfo, self).__init__(request, **kwargs)
def set_paging(self):
# handling 0:24
match = re.match(r'^(\d*):(\d*):{0,1}\d*$', self.jsonpath_paging)
if match:
start, end = match.groups()
if(start.length == 0):
start = 0
if(end.length == 0):
end = int(start) + self.max_count
start, end = int(start), int(end)+1 # second argument means the element should be included!
self.start = start
count = self.max_count
if end-start < self.max_count:
count = end-start
self.end = start+count
def set_sorting(self):
# handling /@['field1'],/@['field2']
for f in self.jsonpath_sorting.split(",/"):
m = re.match(r"([\\/])@\['(.*)'\]", f)
if m:
sort_prefix = "-"
direction, field = m.groups()
if direction == "/":
descending = ""
self.sorting.append(sort_prefix + field)
def set_filters(self):
# handling ?(@.field1='searchterm*'&@.field2~'*search*')
pass | bsd-3-clause |
inuyasha2012/tornado-cat-example | example/main.py | 1 | 8865 | # coding=utf-8
from psycopg2.extras import NamedTupleCursor, Json
from tornado.web import Application, HTTPError
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado.options import parse_command_line
import momoko
import os
from bank import SelectQuestion, get_level_one_item
from base import BaseHandler, SessionBaseHandler
from settings import MAX_ANSWER_COUNT, DSN, COOKIE_SECRET
from utils import Flow, get_quiz_stage, Que, session_reset, CheckChoice
class QuestionnaireListHandler(BaseHandler):
@gen.coroutine
def get(self):
# 问卷列表
cursor = yield self.db.execute("SELECT id, name FROM questionnaire;")
q_list = cursor.fetchall()
self.render('index.html', q_list=q_list)
class QuestionHandler(SessionBaseHandler):
@gen.coroutine
def _check_q_exist_n_get_q_a(self, q_id):
"""
:param q_id:
:raise gen.Return: 返回去q_a,q是questionnaire,a是answer
"""
session_key = self.session_key
cursor = yield self.db.execute(
"""
SELECT answer.id as aid, answer.score_answer, answer.old_answer,
answer.order_answer, answer.try_count,
answer.has_finished, questionnaire.id, questionnaire.type, questionnaire.second,
questionnaire.flow, questionnaire.level_one_count from answer
INNER JOIN questionnaire ON answer.questionnaire_id = questionnaire.id
WHERE answer.questionnaire_id=%s
AND answer.session_key=%s;
""", (q_id, session_key)
)
# q_a的意思是questionnaire and answer
q_a = cursor.fetchone()
if not q_a:
cursor = yield self.db.execute("SELECT id, type, flow, level_one_count, second "
"FROM questionnaire WHERE id=%s;",
(q_id,))
q = cursor.fetchone()
if q:
cursor = yield self.db.execute("INSERT INTO answer (questionnaire_id, session_key, "
"score_answer, order_answer, old_answer) VALUES (%s, %s, %s, %s, %s)"
"RETURNING id AS aid, score_answer, "
"order_answer, old_answer, try_count, "
"has_finished;",
(q_id, session_key, Json({}), Json({}), Json({})))
ans = cursor.fetchone()
raise gen.Return((q, ans))
else:
raise HTTPError(404)
else:
raise gen.Return((q_a, q_a))
@gen.coroutine
def get(self, q_id):
session = self.session
q_a = yield self._check_q_exist_n_get_q_a(q_id)
q, ans = q_a
# 下面是session的键值
is_re_start = 'is_%s_re_start' % q_id
step = '%s_step' % q_id
stage = '%s_stage' % q_id
next_item = '%s_next_item' % q_id
step_count = '%s_step_count' % q_id
# 被试答题的过程
flow = Flow(flow=q.flow, name=session.session_key)
# 如果session不存在is_X_start_id,说明被试可能关闭了浏览器,所以重新启动测验
if not session.get(is_re_start, True):
# 判断测验的第一阶段是否处于结束位置
if session[stage] == 1:
next_item_list = session[next_item]
que = Que(*next_item_list.pop(0))
else:
next_item = session[next_item]
que = Que(*next_item)
# 将是否重新测验设定为真,则若关闭浏览器或刷新页面,则重启测验
session[is_re_start] = True
session[step] += 1
session[stage] = get_quiz_stage(session[step], session[stage], flow)
else:
# 开始测验或重启测验,session恢复出厂设置
session_reset(session, q_id)
# 测验作答次数+1
if ans.try_count > (MAX_ANSWER_COUNT - 1):
raise HTTPError(403)
# 之前的旧答案存入old_answer中
if ans.score_answer:
ans.old_answer.update(ans.score_answer)
ans.score_answer.clear()
ans.order_answer.clear()
# 第一阶段需要回答的题量
count = flow.get_level_item_count(1)
# 给用户展现的第一道试题
que = yield get_level_one_item(ans, session, q, count, self.db)
yield self.db.execute(
"UPDATE answer SET has_finished = false, try_count = try_count + 1, score_answer=%s, order_answer=%s, "
"old_answer=%s WHERE id=%s",
(Json(ans.score_answer), Json(ans.order_answer), Json(ans.old_answer), ans.aid)
)
# 总共答题量
session[step_count] = flow.total_item_count
yield self.db.execute("UPDATE question SET count = count + 1 WHERE id=%s", (que.id, ))
total_step_count = session[step_count]
current_step = session[step]
current_progress = int((current_step * 1.0 / total_step_count) * 100)
second = q.second
session['q_%s_id' % q_id] = que
yield self.save()
self.render('cat.html', que=que, current_progress=current_progress,
total_step_count=total_step_count, current_step=current_step,
q_id=q_id, second=second)
@gen.coroutine
def post(self, q_id):
session = self.session
q_a = yield self._check_q_exist_n_get_q_a(q_id)
q, ans = q_a
q_type = q.type
que = Que(*session.get('q_%s_id' % q_id))
que_choice = self.get_argument('question')
check_choice = CheckChoice(que_choice, que)
if check_choice.is_valid():
# 保存作答结果
value = check_choice.value
session['%s_score' % q_id].append(int(value))
ans.score_answer[str(que.id)]['score'] = value
ans.score_answer[str(que.id)]['choice'] = que_choice
# 生成重定向URL
SelectQuestionClass = getattr(SelectQuestion, q_type)
url = yield SelectQuestionClass(session=session, q=q, que_id=que.id,
ans=ans, db=self.db).get_que_then_redirect()
yield self.save()
self.redirect(url)
else:
# 数据不合格则返回原作答页面
current_step = session['%s_step' % q_id]
total_step_count = session['%s_step_count' % q_id]
current_progress = int((current_step * 1.0 / total_step_count) * 100)
second = q.second
self.render('cat.html', que=que, current_progress=current_progress,
total_step_count=total_step_count, current_step=current_step,
q_id=q_id, second=second)
class ResultHandler(BaseHandler):
@gen.coroutine
def _check_result_exist_n_get_q_a(self, q_id):
session_key = self.get_cookie('sessionid')
if not session_key:
raise HTTPError(404)
cursor = yield self.db.execute(
"""
SELECT answer.score_answer, answer.order_answer, answer.has_finished from answer
INNER JOIN questionnaire ON answer.questionnaire_id = questionnaire.id
WHERE answer.questionnaire_id=%s
AND answer.session_key=%s;
""", (q_id, session_key)
)
# q_a的意思是questionnaire and answer
q_a = cursor.fetchone()
if (not q_a) or (not q_a.has_finished):
raise HTTPError(404)
else:
raise gen.Return(q_a)
@gen.coroutine
def get(self, q_id):
q_a = yield self._check_result_exist_n_get_q_a(q_id)
self.render('result.html', q_a=q_a, q_id=q_id)
if __name__ == "__main__":
parse_command_line()
ioloop = IOLoop.instance()
application = Application([
(r"/", QuestionnaireListHandler),
(r"/cat/(\d+)", QuestionHandler),
(r"/result/(\d+)", ResultHandler)
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=COOKIE_SECRET,
debug=True,
xsrf_cookies=True,
)
application.db = momoko.Pool(
dsn=DSN,
size=1,
ioloop=ioloop,
cursor_factory=NamedTupleCursor,
)
future = application.db.connect()
ioloop.add_future(future, lambda f: ioloop.stop())
ioloop.start()
future.result()
http_server = HTTPServer(application)
http_server.listen(8000, 'localhost')
ioloop.start()
| mit |
leishiran/shadowsocks-1 | setup.py | 929 | 1321 | import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocks",
version="2.8.2",
license='http://www.apache.org/licenses/LICENSE-2.0',
description="A fast tunnel proxy that help you get through firewalls",
author='clowwindy',
author_email='[email protected]',
url='https://github.com/shadowsocks/shadowsocks',
packages=['shadowsocks', 'shadowsocks.crypto'],
package_data={
'shadowsocks': ['README.rst', 'LICENSE']
},
install_requires=[],
entry_points="""
[console_scripts]
sslocal = shadowsocks.local:main
ssserver = shadowsocks.server:main
""",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
| apache-2.0 |
metaron-uk/xbmc | lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsoleMulticast.py | 202 | 3159 | #!/usr/bin/env python
from struct import *
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_MULTICAST_ADDR = "239.255.255.100"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
mreq = pack("4sl", inet_aton(UDP_MULTICAST_ADDR), INADDR_ANY)
self.socket.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
| gpl-2.0 |
MagicSolutions/django-cms | cms/migrations/0001_initial.py | 15 | 30501 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Placeholder'
db.create_table('cms_placeholder', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slot', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('default_width', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True)),
))
db.send_create_signal('cms', ['Placeholder'])
# Adding model 'CMSPlugin'
db.create_table('cms_cmsplugin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('placeholder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.CMSPlugin'], null=True, blank=True)),
('position', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('plugin_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('changed_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('cms', ['CMSPlugin'])
# Adding model 'Page'
db.create_table('cms_page', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_by', self.gf('django.db.models.fields.CharField')(max_length=70)),
('changed_by', self.gf('django.db.models.fields.CharField')(max_length=70)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['cms.Page'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('publication_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('publication_end_date', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('in_navigation', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('soft_root', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('reverse_id', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=40, null=True, blank=True)),
('navigation_extenders', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=80, null=True, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('template', self.gf('django.db.models.fields.CharField')(max_length=100)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('moderator_state', self.gf('django.db.models.fields.SmallIntegerField')(default=1, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('limit_visibility_in_menu', self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, db_index=True, blank=True)),
('publisher_is_draft', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
('publisher_public', self.gf('django.db.models.fields.related.OneToOneField')(related_name='publisher_draft', unique=True, null=True, to=orm['cms.Page'])),
('publisher_state', self.gf('django.db.models.fields.SmallIntegerField')(default=0, db_index=True)),
))
db.send_create_signal('cms', ['Page'])
# Adding M2M table for field placeholders on 'Page'
db.create_table('cms_page_placeholders', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('page', models.ForeignKey(orm['cms.page'], null=False)),
('placeholder', models.ForeignKey(orm['cms.placeholder'], null=False))
))
db.create_unique('cms_page_placeholders', ['page_id', 'placeholder_id'])
# Adding model 'PageModerator'
db.create_table('cms_pagemoderator', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label])),
('moderate_page', self.gf('django.db.models.fields.BooleanField')(default=False)),
('moderate_children', self.gf('django.db.models.fields.BooleanField')(default=False)),
('moderate_descendants', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cms', ['PageModerator'])
# Adding model 'PageModeratorState'
db.create_table('cms_pagemoderatorstate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')(default='', max_length=1000, blank=True)),
))
db.send_create_signal('cms', ['PageModeratorState'])
# Adding model 'GlobalPagePermission'
db.create_table('cms_globalpagepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('can_change', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_add', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_delete', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_advanced_settings', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_publish', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_permissions', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_move_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_moderate', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_view', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_recover_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('cms', ['GlobalPagePermission'])
# Adding M2M table for field sites on 'GlobalPagePermission'
db.create_table('cms_globalpagepermission_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('globalpagepermission', models.ForeignKey(orm['cms.globalpagepermission'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('cms_globalpagepermission_sites', ['globalpagepermission_id', 'site_id'])
# Adding model 'PagePermission'
db.create_table('cms_pagepermission', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('can_change', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_add', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_delete', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_advanced_settings', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_publish', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_change_permissions', self.gf('django.db.models.fields.BooleanField')(default=False)),
('can_move_page', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_moderate', self.gf('django.db.models.fields.BooleanField')(default=True)),
('can_view', self.gf('django.db.models.fields.BooleanField')(default=False)),
('grant_on', self.gf('django.db.models.fields.IntegerField')(default=5)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, blank=True)),
))
db.send_create_signal('cms', ['PagePermission'])
# Adding model 'PageUser'
db.create_table('cms_pageuser', (
(user_ptr_name, self.gf('django.db.models.fields.related.OneToOneField')(to=orm[user_orm_label], unique=True, primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_users', to=orm[user_orm_label])),
))
db.send_create_signal('cms', ['PageUser'])
# Adding model 'PageUserGroup'
db.create_table('cms_pageusergroup', (
('group_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.Group'], unique=True, primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_usergroups', to=orm[user_orm_label])),
))
db.send_create_signal('cms', ['PageUserGroup'])
# Adding model 'Title'
db.create_table('cms_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('menu_title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255)),
('path', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('has_url_overwrite', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('application_urls', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=200, null=True, blank=True)),
('redirect', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('meta_description', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('meta_keywords', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('page_title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(related_name='title_set', to=orm['cms.Page'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('cms', ['Title'])
# Adding unique constraint on 'Title', fields ['language', 'page']
db.create_unique('cms_title', ['language', 'page_id'])
def backwards(self, orm):
# Removing unique constraint on 'Title', fields ['language', 'page']
db.delete_unique('cms_title', ['language', 'page_id'])
# Deleting model 'Placeholder'
db.delete_table('cms_placeholder')
# Deleting model 'CMSPlugin'
db.delete_table('cms_cmsplugin')
# Deleting model 'Page'
db.delete_table('cms_page')
# Removing M2M table for field placeholders on 'Page'
db.delete_table('cms_page_placeholders')
# Deleting model 'PageModerator'
db.delete_table('cms_pagemoderator')
# Deleting model 'PageModeratorState'
db.delete_table('cms_pagemoderatorstate')
# Deleting model 'GlobalPagePermission'
db.delete_table('cms_globalpagepermission')
# Removing M2M table for field sites on 'GlobalPagePermission'
db.delete_table('cms_globalpagepermission_sites')
# Deleting model 'PagePermission'
db.delete_table('cms_pagepermission')
# Deleting model 'PageUser'
db.delete_table('cms_pageuser')
# Deleting model 'PageUserGroup'
db.delete_table('cms_pageusergroup')
# Deleting model 'Title'
db.delete_table('cms_title')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
2014c2g12/c2g12 | wsgi/exts/w2/static/Brython2.0.0-20140209-164925/Lib/_imp.py | 115 | 2013 | """(Extremely) low-level import machinery bits as used by importlib and imp."""
class __loader__(object):pass
def _fix_co_filename(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:_fix_co_filename'))
def acquire_lock(*args,**kw):
"""acquire_lock() -> None Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety
when importing modules.
On platforms without threads, this function does nothing."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:acquire_lock'))
def extension_suffixes(*args,**kw):
"""extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules."""
return ['.pyd']
def get_frozen_object(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:get_frozen_object'))
def init_builtin(module,*args,**kw):
return __import__(module)
def init_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:init_frozen'))
def is_builtin(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_builtin'))
def is_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen'))
def is_frozen_package(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen_package'))
def load_dynamic(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:load_dynamic'))
def lock_held(*args,**kw):
"""lock_held() -> boolean Return True if the import lock is currently held, else False.
On platforms without threads, return False."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:lock_held'))
def release_lock(*args,**kw):
"""release_lock() -> None Release the interpreter's import lock.
On platforms without threads, this function does nothing."""
raise NotImplementedError("%s:not implemented" % ('_imp.py:release_lock'))
| gpl-2.0 |
iDTLabssl/kitsune | kitsune/announcements/tests/test_models.py | 17 | 2316 | from datetime import datetime, timedelta
from nose.tools import eq_
from kitsune.announcements.models import Announcement
from kitsune.announcements.tests import announcement
from kitsune.sumo.tests import TestCase
from kitsune.users.tests import user, group, profile
from kitsune.wiki.tests import locale
class AnnouncementModelTests(TestCase):
def setUp(self):
super(AnnouncementModelTests, self).setUp()
self.creator = user(save=True)
profile(user=self.creator)
self.group = group(save=True)
self.locale = locale(locale='es', save=True)
self.creator.groups.add(self.group)
def test_active(self):
"""Active announcement shows."""
announcement(show_after=datetime.now() - timedelta(days=2),
show_until=datetime.now() + timedelta(days=2)).save()
eq_(1, Announcement.get_site_wide().count())
def test_always_visible(self):
"""Always visible announcements are shown."""
# This one doesn't show
announcement(show_after=datetime.now() + timedelta(days=2)).save()
announcement(show_after=datetime.now() - timedelta(days=2),
content='stardate 43125').save()
site_wide = Announcement.get_site_wide()
eq_(1, site_wide.count())
eq_('stardate 43125', site_wide[0].content)
def test_group_excluded(self):
"""Announcements in a group are not shown."""
announcement(group=self.group).save()
eq_(0, Announcement.get_site_wide().count())
def test_get_for_group_id(self):
"""If no groups are passed, nothing is returned."""
# Site-wide announcement
announcement().save()
# Announcement in a group.
a = announcement(group=self.group, save=True)
group_ann = Announcement.get_for_group_id(self.group.id)
eq_(1, len(group_ann))
eq_(a, group_ann[0])
def test_get_for_locale_name(self):
"""Announcements for a specific locale are shown."""
# Site-wide announcement
announcement(save=True)
# Announcement in a locale
a = announcement(locale=self.locale, save=True)
locale_ann = Announcement.get_for_locale_name(self.locale.locale)
eq_(1, locale_ann.count())
eq_(a, locale_ann[0])
| bsd-3-clause |
fbradyirl/home-assistant | homeassistant/components/vera/sensor.py | 2 | 3569 | """Support for Vera sensors."""
from datetime import timedelta
import logging
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert
from . import VERA_CONTROLLER, VERA_DEVICES, VeraDevice
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vera controller devices."""
add_entities(
[
VeraSensor(device, hass.data[VERA_CONTROLLER])
for device in hass.data[VERA_DEVICES]["sensor"]
],
True,
)
class VeraSensor(VeraDevice, Entity):
"""Representation of a Vera Sensor."""
def __init__(self, vera_device, controller):
"""Initialize the sensor."""
self.current_value = None
self._temperature_units = None
self.last_changed_time = None
VeraDevice.__init__(self, vera_device, controller)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def state(self):
"""Return the name of the sensor."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
import pyvera as veraApi
if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:
return self._temperature_units
if self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:
return "lx"
if self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:
return "level"
if self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:
return "%"
if self.vera_device.category == veraApi.CATEGORY_POWER_METER:
return "watts"
def update(self):
"""Update the state."""
import pyvera as veraApi
if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:
self.current_value = self.vera_device.temperature
vera_temp_units = self.vera_device.vera_controller.temperature_units
if vera_temp_units == "F":
self._temperature_units = TEMP_FAHRENHEIT
else:
self._temperature_units = TEMP_CELSIUS
elif self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:
self.current_value = self.vera_device.light
elif self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:
self.current_value = self.vera_device.light
elif self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:
self.current_value = self.vera_device.humidity
elif self.vera_device.category == veraApi.CATEGORY_SCENE_CONTROLLER:
value = self.vera_device.get_last_scene_id(True)
time = self.vera_device.get_last_scene_time(True)
if time == self.last_changed_time:
self.current_value = None
else:
self.current_value = value
self.last_changed_time = time
elif self.vera_device.category == veraApi.CATEGORY_POWER_METER:
power = convert(self.vera_device.power, float, 0)
self.current_value = int(round(power, 0))
elif self.vera_device.is_trippable:
tripped = self.vera_device.is_tripped
self.current_value = "Tripped" if tripped else "Not Tripped"
else:
self.current_value = "Unknown"
| apache-2.0 |
Dfelker/ansible | v1/ansible/runner/action_plugins/unarchive.py | 109 | 5211 | # (c) 2012, Michael DeHaan <[email protected]>
# (c) 2013, Dylan Martin <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible import utils
import ansible.utils.template as template
from ansible import errors
from ansible.runner.return_data import ReturnData
## fixes https://github.com/ansible/ansible/issues/3518
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
import sys
reload(sys)
sys.setdefaultencoding("utf8")
import pipes
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for file transfer operations '''
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
source = options.get('src', None)
dest = options.get('dest', None)
copy = utils.boolean(options.get('copy', 'yes'))
creates = options.get('creates', None)
if source is None or dest is None:
result = dict(failed=True, msg="src (or content) and dest are required")
return ReturnData(conn=conn, result=result)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
module_args_tmp = ""
complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False)
module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
complex_args=complex_args_tmp, delete_remote_tmp=False)
stat = module_return.result.get('stat', None)
if stat and stat.get('exists', False):
return ReturnData(
conn=conn,
comm_ok=True,
result=dict(
changed=False,
msg=("skipped, since %s exists" % creates)
)
)
dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts.
source = template.template(self.runner.basedir, os.path.expanduser(source), inject)
if copy:
if '_original_file' in inject:
source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
else:
source = utils.path_dwim(self.runner.basedir, source)
remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
if remote_checksum == '4':
result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
return ReturnData(conn=conn, result=result)
if remote_checksum != '3':
result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
return ReturnData(conn=conn, result=result)
if copy:
# transfer the file to a remote tmp location
tmp_src = tmp + 'source'
conn.put_file(source, tmp_src)
# handle diff mode client side
# handle check mode client side
# fix file permissions when the copy is done as a different user
if copy:
if self.runner.become and self.runner.become_user != 'root':
if not self.runner.noop_on_check(inject):
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
# Build temporary module_args.
new_module_args = dict(
src=tmp_src,
original_basename=os.path.basename(source),
)
# make sure checkmod is passed on correctly
if self.runner.noop_on_check(inject):
new_module_args['CHECKMODE'] = True
module_args = utils.merge_module_args(module_args, new_module_args)
else:
module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source)))
# make sure checkmod is passed on correctly
if self.runner.noop_on_check(inject):
module_args += " CHECKMODE=True"
return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args)
| gpl-3.0 |
hslee16/ansible-modules-extras | system/pam_limits.py | 22 | 8998 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Sebastien Rohaut <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import shutil
import re
DOCUMENTATION = '''
---
module: pam_limits
version_added: "2.0"
authors:
- "Sebastien Rohaut (@usawa)"
short_description: Modify Linux PAM limits
description:
- The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.
For the full documentation, see man limits.conf(5).
options:
domain:
description:
- A username, @groupname, wildcard, uid/gid range.
required: true
limit_type:
description:
- Limit type, see C(man limits) for an explanation
required: true
choices: [ "hard", "soft", "-" ]
limit_item:
description:
- The limit to be set
required: true
choices: [ "core", "data", "fsize", "memlock", "nofile", "rss", "stack", "cpu", "nproc", "as", "maxlogins", "maxsyslogins", "priority", "locks", "sigpending", "msgqueue", "nice", "rtprio", "chroot" ]
value:
description:
- The value of the limit.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
use_min:
description:
- If set to C(yes), the minimal value will be used or conserved.
If the specified value is inferior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
use_max:
description:
- If set to C(yes), the maximal value will be used or conserved.
If the specified value is superior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
dest:
description:
- Modify the limits.conf path.
required: false
default: "/etc/security/limits.conf"
comment:
description:
- Comment associated with the limit.
required: false
default: ''
'''
EXAMPLES = '''
# Add or modify nofile soft limit for the user joe
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
# Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
- pam_limits: domain=smith limit_type=hard limit_item=fsize value=1000000 use_max=yes
# Add or modify memlock, both soft and hard, limit for the user james with a comment.
- pam_limits: domain=james limit_type=- limit_item=memlock value=unlimited comment="unlimited memory lock for james"
'''
def main():
pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]
pam_types = [ 'soft', 'hard', '-' ]
limits_conf = '/etc/security/limits.conf'
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
domain = dict(required=True, type='str'),
limit_type = dict(required=True, type='str', choices=pam_types),
limit_item = dict(required=True, type='str', choices=pam_items),
value = dict(required=True, type='str'),
use_max = dict(default=False, type='bool'),
use_min = dict(default=False, type='bool'),
backup = dict(default=False, type='bool'),
dest = dict(default=limits_conf, type='str'),
comment = dict(required=False, default='', type='str')
)
)
domain = module.params['domain']
limit_type = module.params['limit_type']
limit_item = module.params['limit_item']
value = module.params['value']
use_max = module.params['use_max']
use_min = module.params['use_min']
backup = module.params['backup']
limits_conf = module.params['dest']
new_comment = module.params['comment']
changed = False
if os.path.isfile(limits_conf):
if not os.access(limits_conf, os.W_OK):
module.fail_json(msg="%s is not writable. Use sudo" % (limits_conf) )
else:
module.fail_json(msg="%s is not visible (check presence, access rights, use sudo)" % (limits_conf) )
if use_max and use_min:
module.fail_json(msg="Cannot use use_min and use_max at the same time." )
if not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
module.fail_json(msg="Argument 'value' can be one of 'unlimited', 'infinity', '-1' or positive number. Refer to manual pages for more details.")
# Backup
if backup:
backup_file = module.backup_local(limits_conf)
space_pattern = re.compile(r'\s+')
message = ''
f = open (limits_conf, 'r')
# Tempfile
nf = tempfile.NamedTemporaryFile(delete = False)
found = False
new_value = value
for line in f:
if line.startswith('#'):
nf.write(line)
continue
newline = re.sub(space_pattern, ' ', line).strip()
if not newline:
nf.write(line)
continue
# Remove comment in line
newline = newline.split('#',1)[0]
try:
old_comment = line.split('#',1)[1]
except:
old_comment = ''
newline = newline.rstrip()
if not new_comment:
new_comment = old_comment
if new_comment:
new_comment = "\t#"+new_comment
line_fields = newline.split(' ')
if len(line_fields) != 4:
nf.write(line)
continue
line_domain = line_fields[0]
line_type = line_fields[1]
line_item = line_fields[2]
actual_value = line_fields[3]
if not (actual_value in ['unlimited', 'infinity', '-1'] or actual_value.isdigit()):
module.fail_json(msg="Invalid configuration of '%s'. Current value of %s is unsupported." % (limits_conf, line_item))
# Found the line
if line_domain == domain and line_type == limit_type and line_item == limit_item:
found = True
if value == actual_value:
message = line
nf.write(line)
continue
actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
value_unlimited = value in ['unlimited', 'infinity', '-1']
if use_max:
if value.isdigit() and actual_value.isdigit():
new_value = max(int(value), int(actual_value))
elif actual_value_unlimited:
new_value = actual_value
else:
new_value = value
if use_min:
if value.isdigit() and actual_value.isdigit():
new_value = min(int(value), int(actual_value))
elif value_unlimited:
new_value = actual_value
else:
new_value = value
# Change line only if value has changed
if new_value != actual_value:
changed = True
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
message = new_limit
nf.write(new_limit)
else:
message = line
nf.write(line)
else:
nf.write(line)
if not found:
changed = True
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
message = new_limit
nf.write(new_limit)
f.close()
nf.flush()
# Copy tempfile to newfile
module.atomic_move(nf.name, f.name)
try:
nf.close()
except:
pass
res_args = dict(
changed = changed, msg = message
)
if backup:
res_args['backup_file'] = backup_file
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
open-synergy/reporting-engine | report_qweb_element_page_visibility/__openerp__.py | 9 | 1825 | # -*- coding: utf-8 -*-
#########################################################################
# #
# Copyright (C) 2015 Agile Business Group #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public Licensefor more details. #
# #
# You should have received a copy of the #
# GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#########################################################################
{
'name': 'Report Qweb Element Page Visibility',
'version': '8.0.1.0.0',
'author': 'Agile Business Group, Odoo Community Association (OCA)',
'category': 'Tools',
"website": "https://odoo-community.org/",
"license": "AGPL-3",
"application": False,
"installable": True,
'data': [
'views/layouts.xml',
],
'depends': [
'report',
],
}
| agpl-3.0 |
mingwpy/scipy | scipy/sparse/linalg/matfuncs.py | 62 | 25583 | """
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
__all__ = ['expm', 'inv']
import math
import numpy as np
import scipy.misc
from scipy.linalg.basic import solve, solve_triangular
from scipy.sparse.base import isspmatrix
from scipy.sparse.construct import eye as speye
from scipy.sparse.linalg import spsolve
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
.. versionadded:: 0.12.0
"""
I = speye(A.shape[0], A.shape[1], dtype=A.dtype, format=A.format)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return max(v)
def _onenorm(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _ident_like(A):
# A compatibility function which should eventually disappear.
# This is copypasted from expm_action.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def _count_nonzero(A):
# A compatibility function which should eventually disappear.
#XXX There should be a better way to do this when A is sparse
# in the traditional sense.
if isspmatrix(A):
return np.sum(A.toarray() != 0)
else:
return np.count_nonzero(A)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
if lower_part.nnz == 0:
# structural upper triangularity
return True
else:
# coincidental upper triangularity
return _count_nonzero(lower_part) == 0
else:
return _count_nonzero(np.tril(A, -1)) == 0
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if not isspmatrix(A) and not isspmatrix(B):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _sinch(x):
"""
Stably evaluate sinch.
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
x2 = x*x
if abs(x) < 0.0135:
return 1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))
else:
return np.sinh(x) / x
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * np.exp(a) * _sinch(b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = T.diagonal().copy()
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.misc.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)
| bsd-3-clause |
bmanojlovic/ansible | lib/ansible/modules/windows/win_package.py | 9 | 5063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = r'''
---
module: win_package
version_added: "1.7"
author: Trond Hindenes
short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- Installs or uninstalls a package.
- 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
options:
path:
description:
- Location of the package to be installed (either on file system, network share or url)
required: true
name:
description:
- Name of the package, if name isn't specified the path will be used for log messages
required: false
default: null
product_id:
description:
- Product id of the installed package (used for checking if already installed)
- You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
required: true
aliases: [productid]
arguments:
description:
- Any arguments the installer needs
default: null
required: false
state:
description:
- Install or Uninstall
choices:
- present
- absent
default: present
required: false
aliases: [ensure]
user_name:
description:
- Username of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
default: null
required: false
user_password:
description:
- Password of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
default: null
required: false
expected_return_code:
description:
- One or more return codes from the package installation that indicates success.
- If not provided, defaults to 0
required: no
default: 0
'''
EXAMPLES = r'''
- name: Install the Visual C thingy
win_package:
name: Microsoft Visual C thingy
path: http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe
product_id: '{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}'
arguments: /install /passive /norestart
- name: Install Remote Desktop Connection Manager from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
- name: Uninstall Remote Desktop Connection Manager installed from msi
win_package:
path: https://download.microsoft.com/download/A/F/0/AF0071F3-B198-4A35-AA90-C68D103BDCCF/rdcman.msi
product_id: '{0240359E-6A4C-4884-9E94-B397A02D893C}'
state: absent
# Specify the expected non-zero return code when successful
# In this case 3010 indicates 'reboot required'
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: 3010
# Specify multiple non-zero return codes when successful
# In this case we can say that both 0 (SUCCESSFUL) and 3010 (REBOOT REQUIRED) codes are acceptable
- name: 'Microsoft .NET Framework 4.5.1'
win_package:
path: https://download.microsoft.com/download/1/6/7/167F0D79-9317-48AE-AEDB-17120579F8E2/NDP451-KB2858728-x86-x64-AllOS-ENU.exe
productid: '{7DEBE4EB-6B40-3766-BB35-5CBBC385DA37}'
arguments: '/q /norestart'
ensure: present
expected_return_code: [0,3010]
'''
| gpl-3.0 |
TangentMicroServices/ApplicationService | templates/settings.py | 2 | 3541 | """
Django settings for hoursservice project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*_ac&t31acs3_)@+636p(xuu!9i&n5)tw#y)5a2xjfajiwubk5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
## 3rd party
'rest_framework',
'rest_framework_swagger',
## custom
'tokenauth',
'api',
# testing etc:
'django_jenkins',
'django_extensions',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'tokenauth.middleware.TokenAuthMiddleware',
)
ROOT_URLCONF = '{{repo_name|lower}}.urls'
WSGI_APPLICATION = '{{repo_name|lower}}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
###
# App Specific configurations
###
# CUSTOM AUTH
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'tokenauth.authbackends.TokenAuthBackend'
)
## REST
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'tokenauth.authbackends.RESTTokenAuthBackend',
)
}
JENKINS_TASKS = (
'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.with_coverage',
# 'django_jenkins.tasks.run_sloccount',
# 'django_jenkins.tasks.run_graphmodels'
)
PROJECT_APPS = (
'api',
)
SWAGGER_SETTINGS = {
'api_key': 'fb5df470df0fa3727c49a61608996618d0954289',
'info': {
'contact': '[email protected]',
'description': '...',
'license': 'MIT',
'title': '{{repo_name}}',
},
}
STATIC_ROOT = '/srv/static/{{repo_name|lower}}/static/'
# Services:
# (will be overwritten at deploy time)
# format: {{service_name|upper}}_BASE_URL
## Service base urls without a trailing slash:
USERSERVICE_BASE_URL = 'http://staging.userservice.tangentme.com'
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.