repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jonyroda97/redbot-amigosprovaveis | lib/youtube_dl/extractor/sevenplus.py | 24 | 2923 | # coding: utf-8
from __future__ import unicode_literals
import re
from .brightcove import BrightcoveNewIE
from ..compat import compat_str
from ..utils import (
try_get,
update_url_query,
)
class SevenPlusIE(BrightcoveNewIE):
IE_NAME = '7plus'
_VALID_URL = r'https?://(?:www\.)?7plus\.com\.au/(?P<path>[^?]+\?.*?\bepisode-id=(?P<id>[^&#]+))'
_TESTS = [{
'url': 'https://7plus.com.au/MTYS?episode-id=MTYS7-003',
'info_dict': {
'id': 'MTYS7-003',
'ext': 'mp4',
'title': 'S7 E3 - Wind Surf',
'description': 'md5:29c6a69f21accda7601278f81b46483d',
'uploader_id': '5303576322001',
'upload_date': '20171201',
'timestamp': 1512106377,
'series': 'Mighty Ships',
'season_number': 7,
'episode_number': 3,
'episode': 'Wind Surf',
},
'params': {
'format': 'bestvideo',
'skip_download': True,
}
}, {
'url': 'https://7plus.com.au/UUUU?episode-id=AUMS43-001',
'only_matching': True,
}]
def _real_extract(self, url):
path, episode_id = re.match(self._VALID_URL, url).groups()
media = self._download_json(
'https://videoservice.swm.digital/playback', episode_id, query={
'appId': '7plus',
'deviceType': 'web',
'platformType': 'web',
'accountId': 5303576322001,
'referenceId': 'ref:' + episode_id,
'deliveryId': 'csai',
'videoType': 'vod',
})['media']
for source in media.get('sources', {}):
src = source.get('src')
if not src:
continue
source['src'] = update_url_query(src, {'rule': ''})
info = self._parse_brightcove_metadata(media, episode_id)
content = self._download_json(
'https://component-cdn.swm.digital/content/' + path,
episode_id, headers={
'market-id': 4,
}, fatal=False) or {}
for item in content.get('items', {}):
if item.get('componentData', {}).get('componentType') == 'infoPanel':
for src_key, dst_key in [('title', 'title'), ('shortSynopsis', 'description')]:
value = item.get(src_key)
if value:
info[dst_key] = value
info['series'] = try_get(
item, lambda x: x['seriesLogo']['name'], compat_str)
mobj = re.search(r'^S(\d+)\s+E(\d+)\s+-\s+(.+)$', info['title'])
if mobj:
info.update({
'season_number': int(mobj.group(1)),
'episode_number': int(mobj.group(2)),
'episode': mobj.group(3),
})
return info
| gpl-3.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/xml/etree/ElementTree.py | 74 | 57368 | """Lightweight XML support for Python.
XML is an inherently hierarchical data format, and the most natural way to
represent it is with a tree. This module has two classes for this purpose:
1. ElementTree represents the whole XML document as a tree and
2. Element represents a single node in this tree.
Interactions with the whole document (reading and writing to/from files) are
usually done on the ElementTree level. Interactions with a single XML element
and its sub-elements are done on the Element level.
Element is a flexible container object designed to store hierarchical data
structures in memory. It can be described as a cross between a list and a
dictionary. Each Element has a number of properties associated with it:
'tag' - a string containing the element's name.
'attributes' - a Python dictionary storing the element's attributes.
'text' - a string containing the element's text content.
'tail' - an optional string containing text after the element's end tag.
And a number of child elements stored in a Python sequence.
To create an element instance, use the Element constructor,
or the SubElement factory function.
You can also use the ElementTree class to wrap an element structure
and convert it to and from XML.
"""
#---------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
#
# ElementTree
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser",
"register_namespace",
]
VERSION = "1.3.0"
import sys
import re
import warnings
import io
import contextlib
from . import ElementPath
class ParseError(SyntaxError):
"""An error when parsing an XML document.
In addition to its exception value, a ParseError contains
two extra attributes:
'code' - the specific exception code
'position' - the line and column of the error
"""
pass
# --------------------------------------------------------------------
def iselement(element):
"""Return True if *element* appears to be an Element."""
return hasattr(element, 'tag')
class Element:
"""An XML element.
This class is the reference implementation of the Element interface.
An element's length is its number of subelements. That means if you
you want to check if an element is truly empty, you should check BOTH
its length AND its text attribute.
The element tag, attribute names, and attribute values can be either
bytes or strings.
*tag* is the element name. *attrib* is an optional dictionary containing
element attributes. *extra* are additional element attributes given as
keyword arguments.
Example form:
<tag attrib>text<child/>...</tag>tail
"""
tag = None
"""The element's name."""
attrib = None
"""Dictionary of the element's attributes."""
text = None
"""
Text before first subelement. This is either a string or the value None.
Note that if there is no text, this attribute may be either
None or the empty string, depending on the parser.
"""
tail = None
"""
Text after this element's end tag, but before the next sibling element's
start tag. This is either a string or the value None. Note that if there
was no text, this attribute may be either None or an empty string,
depending on the parser.
"""
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
def makeelement(self, tag, attrib):
"""Create a new element with the same type.
*tag* is a string containing the element name.
*attrib* is a dictionary containing the element attributes.
Do not call this method, use the SubElement factory function instead.
"""
return self.__class__(tag, attrib)
def copy(self):
"""Return copy of current element.
This creates a shallow copy. Subelements will be shared with the
original tree.
"""
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
def __getitem__(self, index):
return self._children[index]
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
def __delitem__(self, index):
del self._children[index]
def append(self, subelement):
"""Add *subelement* to the end of this element.
The new element will appear in document order after the last existing
subelement (or directly after the text, if it's the first subelement),
but before the end tag for this element.
"""
self._assert_is_element(subelement)
self._children.append(subelement)
def extend(self, elements):
"""Append subelements from a sequence.
*elements* is a sequence with zero or more elements.
"""
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
def insert(self, index, subelement):
"""Insert *subelement* at position *index*."""
self._assert_is_element(subelement)
self._children.insert(index, subelement)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element_Py):
raise TypeError('expected an Element, not %s' % type(e).__name__)
def remove(self, subelement):
"""Remove matching subelement.
Unlike the find methods, this method compares elements based on
identity, NOT ON tag value or contents. To remove subelements by
other means, the easiest way is to use a list comprehension to
select what elements to keep, and then use slice assignment to update
the parent element.
ValueError is raised if a matching element could not be found.
"""
# assert iselement(element)
self._children.remove(subelement)
def getchildren(self):
"""(Deprecated) Return all subelements.
Elements are returned in document order.
"""
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
return ElementPath.find(self, path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find text for first matching element by tag name or path.
*path* is a string having either an element tag or an XPath,
*default* is the value to return if the element was not found,
*namespaces* is an optional mapping from namespace prefix to full name.
Return text content of first matching element, or default value if
none was found. Note that if an element is found having no text
content, the empty string is returned.
"""
return ElementPath.findtext(self, path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Returns list containing all matching elements in document order.
"""
return ElementPath.findall(self, path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
return ElementPath.iterfind(self, path, namespaces)
def clear(self):
"""Reset element.
This function removes all subelements, clears all attributes, and sets
the text and tail attributes to None.
"""
self.attrib.clear()
self._children = []
self.text = self.tail = None
def get(self, key, default=None):
"""Get element attribute.
Equivalent to attrib.get, but some implementations may handle this a
bit more efficiently. *key* is what attribute to look for, and
*default* is what to return if the attribute was not found.
Returns a string containing the attribute value, or the default if
attribute was not found.
"""
return self.attrib.get(key, default)
def set(self, key, value):
"""Set element attribute.
Equivalent to attrib[key] = value, but some implementations may handle
this a bit more efficiently. *key* is what attribute to set, and
*value* is the attribute value to set it to.
"""
self.attrib[key] = value
def keys(self):
"""Get list of attribute names.
Names are returned in an arbitrary order, just like an ordinary
Python dict. Equivalent to attrib.keys()
"""
return self.attrib.keys()
def items(self):
"""Get element attributes as a sequence.
The attributes are returned in arbitrary order. Equivalent to
attrib.items().
Return a list of (name, value) tuples.
"""
return self.attrib.items()
def iter(self, tag=None):
"""Create tree iterator.
The iterator loops over the element and all subelements in document
order, returning all elements with a matching tag.
If the tree structure is modified during iteration, new or removed
elements may or may not be included. To get a stable set, use the
list() function on the iterator, and loop over the resulting list.
*tag* is what tags to look for (default is to return all elements)
Return an iterator containing all the matching elements.
"""
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def itertext(self):
"""Create text iterator.
The iterator loops over the element and all subelements in document
order, returning all inner text.
"""
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
if self.text:
yield self.text
for e in self:
yield from e.itertext()
if e.tail:
yield e.tail
def SubElement(parent, tag, attrib={}, **extra):
"""Subelement factory which creates an element instance, and appends it
to an existing parent.
The element tag, attribute names, and attribute values can be either
bytes or Unicode strings.
*parent* is the parent element, *tag* is the subelements name, *attrib* is
an optional directory containing element attributes, *extra* are
additional attributes given as keyword arguments.
"""
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
def Comment(text=None):
"""Comment element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*text* is a string containing the comment string.
"""
element = Element(Comment)
element.text = text
return element
def ProcessingInstruction(target, text=None):
"""Processing Instruction element factory.
This function creates a special element which the standard serializer
serializes as an XML comment.
*target* is a string containing the processing instruction, *text* is a
string containing the processing instruction contents, if any.
"""
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
class QName:
"""Qualified name wrapper.
This class can be used to wrap a QName attribute value in order to get
proper namespace handing on output.
*text_or_uri* is a string containing the QName value either in the form
{uri}local, or if the tag argument is given, the URI part of a QName.
*tag* is an optional argument which if given, will make the first
argument (text_or_uri) be interpreted as a URI, and this argument (tag)
be interpreted as a local name.
"""
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<QName %r>' % (self.text,)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
def __ne__(self, other):
if isinstance(other, QName):
return self.text != other.text
return self.text != other
# --------------------------------------------------------------------
class ElementTree:
"""An XML element hierarchy.
This class also provides support for serialization to and from
standard XML.
*element* is an optional root element node,
*file* is an optional file handle or file name of an XML file whose
contents will be used to initialize the tree with.
"""
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
def getroot(self):
"""Return root element of this tree."""
return self._root
def _setroot(self, element):
"""Replace root element of this tree.
This will discard the current contents of the tree and replace it
with the given element. Use with care!
"""
# assert iselement(element)
self._root = element
def parse(self, source, parser=None):
"""Load external XML document into element tree.
*source* is a file name or file object, *parser* is an optional parser
instance that defaults to XMLParser.
ParseError is raised if the parser fails to parse the document.
Returns the root element of the given source document.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if parser is None:
# If no parser was specified, create a default XMLParser
parser = XMLParser()
if hasattr(parser, '_parse_whole'):
# The default XMLParser, when it comes from an accelerator,
# can define an internal _parse_whole API for efficiency.
# It can be used to parse the whole source without feeding
# it with chunks.
self._root = parser._parse_whole(source)
return self._root
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
def iter(self, tag=None):
"""Create and return tree iterator for the root element.
The iterator loops over all elements in this tree, in document order.
*tag* is a string with the tag name to iterate over
(default is to return all elements).
"""
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
def find(self, path, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().find(path), which is Element.find()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
def findtext(self, path, default=None, namespaces=None):
"""Find first matching element by tag name or path.
Same as getroot().findtext(path), which is Element.findtext()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return the first matching element, or None if no element was found.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
def findall(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().findall(path), which is Element.findall().
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return list containing all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
def iterfind(self, path, namespaces=None):
"""Find all matching subelements by tag name or path.
Same as getroot().iterfind(path), which is element.iterfind()
*path* is a string having either an element tag or an XPath,
*namespaces* is an optional mapping from namespace prefix to full name.
Return an iterable yielding all matching elements in document order.
"""
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None, *,
short_empty_elements=True):
"""Write element tree to a file as XML.
Arguments:
*file_or_filename* -- file name or a file object opened for writing
*encoding* -- the output encoding (default: US-ASCII)
*xml_declaration* -- bool indicating if an XML declaration should be
added to the output. If None, an XML declaration
is added if encoding IS NOT either of:
US-ASCII, UTF-8, or Unicode
*default_namespace* -- sets the default XML namespace (for "xmlns")
*method* -- either "xml" (default), "html, "text", or "c14n"
*short_empty_elements* -- controls the formatting of elements
that contain no content. If True (default)
they are emitted as a single self-closed
tag, otherwise they are emitted as a pair
of start/end tags
"""
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
else:
encoding = encoding.lower()
with _get_writer(file_or_filename, encoding) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
encoding not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if encoding == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces,
short_empty_elements=short_empty_elements)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resources after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem) or not short_empty_elements:
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None,
short_empty_elements=short_empty_elements)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces, **kwargs):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
ltag = tag.lower()
if text:
if ltag == "script" or ltag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
def register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
given prefix or the namespace URI will be removed.
*prefix* is the namespace prefix, *uri* is a namespace uri. Tags and
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
def tostring(element, encoding=None, method=None, *,
short_empty_elements=True):
"""Generate string representation of XML element.
All subelements are included. If encoding is "unicode", a string
is returned. Otherwise a bytestring is returned.
*element* is an Element instance, *encoding* is an optional output
encoding defaulting to US-ASCII, *method* is an optional output which can
be one of "xml" (default), "html", "text" or "c14n".
Returns an (optionally) encoded string containing the XML data.
"""
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return stream.getvalue()
class _ListDataStream(io.BufferedIOBase):
"""An auxiliary stream accumulating into a list reference."""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None, *,
short_empty_elements=True):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method,
short_empty_elements=short_empty_elements)
return lst
def dump(elem):
"""Write element tree or element structure to sys.stdout.
This function should be used for debugging only.
*elem* is either an ElementTree, or a single Element. The exact output
format is implementation dependent. In this version, it's written as an
ordinary XML file.
"""
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
def parse(source, parser=None):
"""Parse XML document into element tree.
*source* is a filename or file object containing XML data,
*parser* is an optional parser instance defaulting to XMLParser.
Return an ElementTree instance.
"""
tree = ElementTree()
tree.parse(source, parser)
return tree
def iterparse(source, events=None, parser=None):
"""Incrementally parse XML document into ElementTree.
This class also reports what's going on to the user based on the
*events* it is initialized with. The supported events are the strings
"start", "end", "start-ns" and "end-ns" (the "ns" events are used to get
detailed namespace information). If *events* is omitted, only
"end" events are reported.
*source* is a filename or file object containing XML data, *events* is
a list of events to report back, *parser* is an optional parser instance.
Returns an iterator providing (event, elem) pairs.
"""
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
return _IterParseIterator(source, events, parser, close_source)
class XMLPullParser:
def __init__(self, events=None, *, _parser=None):
# The _parser argument is for internal use only and must not be relied
# upon in user code. It will be removed in a future release.
# See http://bugs.python.org/issue17741 for more details.
# _elementtree.c expects a list, not a deque
self._events_queue = []
self._index = 0
self._parser = _parser or XMLParser(target=TreeBuilder())
# wire up the parser for event reporting
if events is None:
events = ("end",)
self._parser._setevents(self._events_queue, events)
def feed(self, data):
"""Feed encoded data to parser."""
if self._parser is None:
raise ValueError("feed() called after end of stream")
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._events_queue.append(exc)
def _close_and_return_root(self):
# iterparse needs this to set its root attribute properly :(
root = self._parser.close()
self._parser = None
return root
def close(self):
"""Finish feeding data to parser.
Unlike XMLParser, does not return the root element. Use
read_events() to consume elements from XMLPullParser.
"""
self._close_and_return_root()
def read_events(self):
"""Return an iterator over currently available (event, elem) pairs.
Events are consumed from the internal event queue as they are
retrieved from the iterator.
"""
events = self._events_queue
while True:
index = self._index
try:
event = events[self._index]
# Avoid retaining references to past events
events[self._index] = None
except IndexError:
break
index += 1
# Compact the list in a O(1) amortized fashion
# As noted above, _elementree.c needs a list, not a deque
if index * 2 >= len(events):
events[:index] = []
self._index = 0
else:
self._index = index
if isinstance(event, Exception):
raise event
else:
yield event
class _IterParseIterator:
def __init__(self, source, events, parser, close_source=False):
# Use the internal, undocumented _parser argument for now; When the
# parser argument of iterparse is removed, this can be killed.
self._parser = XMLPullParser(events=events, _parser=parser)
self._file = source
self._close_file = close_source
self.root = self._root = None
def __next__(self):
while 1:
for event in self._parser.read_events():
return event
if self._parser._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
data = self._file.read(16 * 1024)
if data:
self._parser.feed(data)
else:
self._root = self._parser._close_and_return_root()
def __iter__(self):
return self
def XML(text, parser=None):
"""Parse XML document from string constant.
This function can be used to embed "XML Literals" in Python code.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
def XMLID(text, parser=None):
"""Parse XML document from string constant for its IDs.
*text* is a string containing XML data, *parser* is an
optional parser instance, defaulting to the standard XMLParser.
Returns an (Element, dict) tuple, in which the
dict maps element id:s to elements.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
# Parse XML document from string constant. Alias for XML().
fromstring = XML
def fromstringlist(sequence, parser=None):
"""Parse XML document from sequence of string fragments.
*sequence* is a list of other sequence, *parser* is an optional parser
instance, defaulting to the standard XMLParser.
Returns an Element instance.
"""
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
class TreeBuilder:
"""Generic element structure builder.
This builder converts a sequence of start, data, and end method
calls to a well-formed element structure.
You can use this class to build an element structure using a custom XML
parser, or a parser for some other XML-like format.
*element_factory* is an optional element factory which is called
to create new Element instances, as necessary.
"""
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
def close(self):
"""Flush builder buffers and return toplevel document Element."""
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
def data(self, data):
"""Add text to current element."""
self._data.append(data)
def start(self, tag, attrs):
"""Open new element and return it.
*tag* is the element name, *attrs* is a dict containing element
attributes.
"""
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
# also see ElementTree and TreeBuilder
class XMLParser:
"""Element structure builder for XML source data based on the expat parser.
*html* are predefined HTML entities (not supported currently),
*target* is an optional target object which defaults to an instance of the
standard TreeBuilder class, *encoding* is an optional encoding string
which if given, overrides the encoding specified in the XML file:
http://www.iana.org/assignments/character-sets
"""
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# Configure pyexpat: buffering, new-style attribute handling.
parser.buffer_text = 1
parser.ordered_attributes = 1
parser.specified_attributes = 1
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _setevents(self, events_queue, events_to_report):
# Internal API for XMLPullParser
# events_to_report: a list of events to report during parsing (same as
# the *events* of XMLPullParser's constructor.
# events_queue: a list of actual parsing events that will be populated
# by the underlying parser.
#
parser = self._parser
append = events_queue.append
for event_name in events_to_report:
if event_name == "start":
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event_name, append=append,
start=self._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event_name == "end":
def handler(tag, event=event_name, append=append,
end=self._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event_name == "start-ns":
def handler(prefix, uri, event=event_name, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event_name == "end-ns":
def handler(prefix, event=event_name, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event_name)
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attr_list):
# Handler for expat's StartElementHandler. Since ordered_attributes
# is set, the attributes are reported as a list of alternating
# attribute name,value.
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attr_list:
for i in range(0, len(attr_list), 2):
attrib[fixname(attr_list[i])] = attr_list[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
def doctype(self, name, pubid, system):
"""(Deprecated) Handle doctype declaration
*name* is the Doctype name, *pubid* is the public identifier,
and *system* is the system identifier.
"""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
def feed(self, data):
"""Feed encoded data to parser."""
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
def close(self):
"""Finish feeding data to parser and return element structure."""
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element is going to be shadowed by the C implementation. We need to keep
# the Python version of it accessible for some "creative" by external code
# (see tests)
_Element_Py = Element
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
| apache-2.0 |
edisonlz/fruit | web_project/base/site-packages/django_extensions/tests/test_compile_pyc.py | 38 | 2083 | import os
import six
import fnmatch
from django.test import TestCase
from django.core.management import call_command
from django_extensions.management.utils import get_project_root
class CompilePycTests(TestCase):
def setUp(self):
self._settings = os.environ.get('DJANGO_SETTINGS_MODULE')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_extensions.settings'
def tearDown(self):
if self._settings:
os.environ['DJANGO_SETTINGS_MODULE'] = self._settings
def _find_pyc(self, path, mask='*.pyc'):
pyc_glob = []
for root, dirs, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
pyc_glob.append(os.path.join(root, filename))
return pyc_glob
def test_compiles_pyc_files(self):
with self.settings(BASE_DIR=get_project_root()):
call_command('clean_pyc')
pyc_glob = self._find_pyc(get_project_root())
self.assertEqual(len(pyc_glob), 0)
with self.settings(BASE_DIR=get_project_root()):
call_command('compile_pyc')
pyc_glob = self._find_pyc(get_project_root())
self.assertTrue(len(pyc_glob) > 0)
with self.settings(BASE_DIR=get_project_root()):
call_command('clean_pyc')
def test_takes_path(self):
out = six.StringIO()
project_root = os.path.join(get_project_root(), 'tests', 'testapp')
with self.settings(BASE_DIR=get_project_root()):
call_command('clean_pyc', path=project_root)
pyc_glob = self._find_pyc(project_root)
self.assertEqual(len(pyc_glob), 0)
with self.settings(BASE_DIR=get_project_root()):
call_command('compile_pyc', verbosity=2, path=project_root, stdout=out)
expected = ['Compiling %s...' % fn for fn in
sorted(self._find_pyc(project_root, mask='*.py'))]
output = out.getvalue().splitlines()
self.assertEqual(expected, sorted(output))
with self.settings(BASE_DIR=get_project_root()):
call_command('clean_pyc')
| apache-2.0 |
wakatime/wakatime | wakatime/packages/urllib3/connectionpool.py | 94 | 35358 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url
if six.PY2:
# Queue is imported for side effects on MS Windows
import Queue as _unused_module_Queue # noqa: F401
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = queue.LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _ipv6_host(host).lower()
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTP connection (%d): %s",
self.num_connections, self.host)
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
method, url, http_version, httplib_response.status,
httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
body_pos=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw['request_method'] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError, CertificateError) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError) as e:
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader('Retry-After'))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self._proxy_host, self.port)
else:
set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTPS connection (%d): %s",
self.num_connections, self.host)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _ipv6_host(host):
"""
Process IPv6 address literals
"""
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
#
# Also if an IPv6 address literal has a zone identifier, the
# percent sign might be URIencoded, convert it back into ASCII
if host.startswith('[') and host.endswith(']'):
host = host.replace('%25', '%').strip('[]')
return host
| bsd-3-clause |
codasus/django-blogages | blogages/django/core/files/uploadhandler.py | 136 | 7193 | """
Base file upload handler classes, and the built-in concrete subclasses
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile
from django.utils import importlib
__all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler',
'load_handler', 'StopFutureHandlers']
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __unicode__(self):
if self.connection_reset:
return u'StopUpload: Halt current upload.'
else:
return u'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length,
charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
if content_type_extra is None:
content_type_extra = {}
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError()
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError()
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = StringIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file = self.file,
field_name = self.field_name,
name = self.file_name,
content_type = self.content_type,
size = file_size,
charset = self.charset
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = importlib.import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr))
return cls(*args, **kwargs)
| mit |
Zephor5/scrapy | scrapy/spiders/__init__.py | 134 | 3606 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.exceptions import ScrapyDeprecationWarning
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
custom_settings = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
@property
def logger(self):
logger = logging.getLogger(self.name)
return logging.LoggerAdapter(logger, {'spider': self})
def log(self, message, level=logging.DEBUG, **kw):
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def set_crawler(self, crawler):
warnings.warn("set_crawler is deprecated, instantiate and bound the "
"spider to this crawler with from_crawler method "
"instead.",
category=ScrapyDeprecationWarning, stacklevel=2)
assert not hasattr(self, 'crawler'), "Spider already bounded to a " \
"crawler"
self._set_crawler(crawler)
def _set_crawler(self, crawler):
self.crawler = crawler
self.settings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def update_settings(cls, settings):
settings.setdict(cls.custom_settings or {}, priority='spider')
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider, reason):
closed = getattr(spider, 'closed', None)
if callable(closed):
return closed(reason)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass(
'"from scrapy.spider import spiders" no longer works - use '
'"from scrapy.spiderloader import SpiderLoader" and instantiate '
'it with your project settings"'
)
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
| bsd-3-clause |
euphorie/Euphorie | src/euphorie/client/tests/test_functional_country.py | 1 | 2524 | # coding=utf-8
from euphorie.client.tests.utils import addSurvey
from euphorie.client.tests.utils import registerUserInClient
from euphorie.content.tests.utils import BASIC_SURVEY
from euphorie.testing import EuphorieFunctionalTestCase
import urllib
class CountryFunctionalTests(EuphorieFunctionalTestCase):
def test_surveys_filtered_by_language(self):
survey = """<sector xmlns="http://xml.simplon.biz/euphorie/survey/1.0">
<title>Sector</title>
<survey>
<title>Survey</title>
<language>en</language>
</survey>
</sector>"""
survey_nl = """<sector xmlns="http://xml.simplon.biz/euphorie/survey/1.0">
<title>Branche</title>
<survey>
<title>Vragenlijst</title>
<language>nl</language>
</survey>
</sector>""" # noqa
self.loginAsPortalOwner()
addSurvey(self.portal, survey)
addSurvey(self.portal, survey_nl)
browser = self.get_browser()
# Pass the language as URL parameter to ensure that we get the NL
# version
browser.open("%s?language=nl" % self.portal.client.absolute_url())
registerUserInClient(browser, link="Registreer")
# Note, this used to test that the URL was that of the client,
# in the correct country (nl), with `?language=nl-NL` appended.
# I don't see where in the code this language URL parameter would
# come from, so I remove it in this test as well.
self.assertEqual(browser.url, "http://nohost/plone/client/nl")
self.assertEqual(
browser.getControl(name="survey").options, ["branche/vragenlijst"]
)
browser.open("%s?language=en" % self.portal.client["nl"].absolute_url())
self.assertEqual(browser.getControl(name="survey").options, ["sector/survey"])
def test_must_select_valid_survey(self):
self.loginAsPortalOwner()
addSurvey(self.portal, BASIC_SURVEY)
browser = self.get_browser()
browser.open(self.portal.client["nl"].absolute_url())
registerUserInClient(browser)
data = urllib.urlencode(
{"action": "new", "survey": "", "title:utf8:ustring": "Foo"}
)
browser.handleErrors = False
browser.open(browser.url, data)
self.assertEqual(browser.url, "http://nohost/plone/client/nl")
| gpl-2.0 |
dan-bowerman/PAGER | PAGER_Scripts/publishService.py | 1 | 21916 | # -*- coding: UTF-8 -*-
"""Functions to aid in publishing a service in the publication workflow.
"""
import glob
import json
import os
import random
import string
import urllib2
import xml.dom.minidom as DOM
from xml.sax.saxutils import escape
import arcpy
import checkError
def createMXD(inFolder, template,logs):
"""Create MXD from the layers in the folder.
Args:
inFolder: Path of folder to work from.
template: Template MXD file.
logs: log list holds all log items for current publication
Returns:
A new MXD file.
"""
checkError.printLog(logs,"Creating MXD...")
arcpy.env.workspace = inFolder
#Open the template
mxd = arcpy.mapping.MapDocument(template)
#Save the template to a new MXD, specific for this data
mxd.saveACopy(inFolder + "\\" + "publishMXD.mxd")
#Reopen the new file
mxd = None
mxd = arcpy.mapping.MapDocument(inFolder + "\\" + "publishMXD.mxd")
#Add layer
#http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#/AddLayer/00s300000025000000/
#http://gis.stackexchange.com/questions/4882/how-do-i-add-a-shapefile-in-arcgis-via-python
shps = arcpy.ListFeatureClasses("*.shp", "")
dataFrame = arcpy.mapping.ListDataFrames(mxd, "*")[0]
if shps:
for shp in shps:
newLayer = arcpy.mapping.Layer(inFolder + "\\" + shp)
arcpy.mapping.AddLayer(dataFrame, newLayer, "BOTTOM")
applyRandomSymbology(mxd, dataFrame, 0,logs)
del newLayer
mxd.save()
checkError.printLog(logs,"Publishing MXD created")
else: #If there's no shapefile
checkError.printLog(logs,"No shapefile. Check payload folder")
return mxd
def applyRandomSymbology(mxd, dataFrame, layerIndex,logs):
"""Change the specified layer's symbology to a random colour.
Args:
mxd: MXD file.
dataFrame: DataFrame object of the MXD file.
layerIndex: Index value of layer.
logs: log list holds all log items for current publication
"""
#Layer you want to update
updateLayer = arcpy.mapping.ListLayers(mxd, "", dataFrame)[layerIndex]
#Grab the properties of the layer
desc = arcpy.Describe(updateLayer)
groupLayerFile = None
if desc.shapeType == 'Point' or desc.shapeType == 'Polygon' or desc.shapeType == 'Polyline':
groupLayerFile = arcpy.mapping.Layer(r"%s\%sColours.lyr" % (os.path.dirname(__file__), desc.shapeType))
else:
return
groupLayerList = arcpy.mapping.ListLayers(groupLayerFile)
groupLayersCount = len(groupLayerList)
#Start with 1 because the first layer of groupLayerList is a group layer
randomNumber = random.randint(1, groupLayersCount - 1)
#Select random layer you want to apply to updateLayer
sourceLayer = groupLayerList[randomNumber]
arcpy.mapping.UpdateLayer(dataFrame, updateLayer, sourceLayer, True)
#Use ArcGIS for Server REST API to get the list of map services that is already
#published
def getCatalog(server, port,logs):
"""Use ArcGIS for Server REST API to get the list of map service that are already published.
Args:
server: Domain of server to connect to.
port: Port of server to connect to.
logs: log list holds all log items for current publication
Returns:
List of map services.
"""
serviceList = []
baseUrl = "http://{}:{}/arcgis/rest/services".format(server, port)
catalog = json.load(urllib2.urlopen(baseUrl + "/" + "?f=json"))
if "error" in catalog:
return
services = catalog['services']
for service in services:
response = json.load(urllib2.urlopen(baseUrl + '/' + service['name'] + '/' + service['type'] + "?f=json"))
serviceList.append(service['name'])
folders = catalog['folders']
for folderName in folders:
catalog = json.load(urllib2.urlopen(baseUrl + "/" + folderName + "?f=json"))
if "error" in catalog:
return
services = catalog['services']
for service in services:
response = json.load(urllib2.urlopen(baseUrl + '/' + service['name'] + '/' + service['type'] + "?f=json"))
serviceList.append(service['name'])
return serviceList
def serviceStatus(serviceFullName,smallKey, smallKeyFolder, server, port,geocatUrl, geocatUsername, geocatPassword,logs):
"""Check the status of a pubilshed service.
Args:
smallKey: Small key of current payload.
SmallKeyFolder: folder of current payload
serviceFullName: Name of the service.
server: Domain of server to connect to.
port: Port of server to connect to.
geocatUrl: geocat Url
geocatUsername: geocat user name
geocatPassword: geocat password
logs: log list holds all log items for current publication
Returns:
A string - 'ERROR' or 'SUCCESS'.
"""
status = 'SUCCESS'
baseUrl = "http://{}:{}/arcgis/rest/services".format(server, port)
response = json.load(urllib2.urlopen(baseUrl + '/' + serviceFullName + '/' + 'MapServer' + "?f=json"))
if "error" in response:
status = 'ERROR'
else:
#check further if there is any records returned
queryUrl = baseUrl + '/' + serviceFullName + '/' + 'MapServer'
queryUrl= queryUrl + "/0/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=&returnGeometry=true&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=true&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&f=json"
response= json.load(urllib2.urlopen(queryUrl))
if "error" in response:
status ="ERROR"
checkError.printLog(logs,"Service " + smallKey + " returns error.")
onlineResources.updateErrorStatus(smallKey, smallKeyFolder, RESTENDPOINTNOTFOUND['code'], geocatUrl, geocatUsername, geocatPassword)
return status
#July 8, 2014 - Not working
def makeDescriptor(smallkey, baseUrl,logs):
"""Use the makeDescriptor service to create a JSON descriptor file.
Assumption: The JSON file exists in a folder.
Args:
smallKey: Small key of current payload.
baseUrl: Base URL of makeDescriptor service.
logs: log list holds all log items for current publication
"""
makeDescriptorUrl = baseUrl + '/' + smallkey
print "make descriptorUrl:"+ makeDescriptorUrl
response = json.load(urllib2.urlopen(makeDescriptorUrl + "?f=json"))
if 'Error' in response:
checkError.printLog(logs,response['Error'])
else:
checkError.printLog(logs,response['msg'])
def getFrenchText(prop):
"""Gets the French text for the given property and returns a string.
Must be parent node that contains "gmd:LocalisedCharacterString" as a direct child.
Args:
prop: Nodelist object to retrieve text from.
Returns:
String of French text (or empty if none exists).
"""
try:
localisedString = prop.item(0).getElementsByTagName("gmd:LocalisedCharacterString")
if localisedString.length > 0 and localisedString.item(0).hasChildNodes():
return localisedString.item(0).firstChild.data
else:
return ""
except:
return ""
def getEnglishText(prop):
"""Gets the English text for the given property and returns a string.
Must be parent node that contains "gco:CharacterString" as a direct child.
Args:
prop: Nodelist object to retrieve text from.
Returns:
String of English text (or empty if none exists).
"""
try:
characterString = prop.item(0).getElementsByTagName("gco:CharacterString")
if characterString.length > 0 and characterString.item(0).hasChildNodes():
return characterString.item(0).firstChild.data
else:
return ""
except:
return ""
def joinStrings((strings), separator=" / "):
"""Joins strings divided by a separator string and returns the result.
Filters out empty strings.
Args:
(strings): Tuple of strings (i.e. (englishText, frenchText)).
separator (optional): Separator string (default = " / ").
Returns:
The joined string.
"""
return separator.join(filter(None, strings))
def setServiceProperties(prop, doc, propList):
"""Sets WMS/WFS service properties using propList dictionary values.
Args:
prop: DOM element node/property to be altered.
doc: DOM Document instance.
propList: Dictionary of WMS/WFS properties.
"""
if prop.firstChild.data in propList:
propValue = propList.get(prop.firstChild.data)
if prop.nextSibling.hasChildNodes():
prop.nextSibling.firstChild.replaceWholeText(propValue)
else:
txt = doc.createTextNode(propValue)
prop.nextSibling.appendChild(txt)
def escapeSpecialCharacters(propList):
"""Substitutes special characters in dictionary with an escape sequence and returns a dictionary.
See: http://resources.arcgis.com/en/help/main/10.2/index.html#//00sq00000082000000
Args:
propList: Dictionary of WMS/WFS properties to be parsed.
Returns:
Dictionary with substituted escape sequences.
"""
chars = {"\"": """,
"'": "'"}
for k, v in propList.items():
#Uses xml.sax.saxutils.escape with custom entities for single/double
#quotes
propList[k] = escape(v, chars)
return propList
def getFirstElement(nodeList, tagName):
"""Gets the first child element of a node list specified by a tag name and returns a node list object.
Args:
nodeList: Node list object to be searched.
tagName: Element name to search for.
Returns:
A NodeList object.
"""
return nodeList.item(0).getElementsByTagName(tagName)
def getMetadata(workspace, smallKey):
"""Gets the metadata records (Eng/Fr) from supplied XML and returns a dictionary.
Args:
workspace: Absolute path of workspace folder.
smallKey: Small key of current payload.
Returns:
A dictionary filled with metadata records.
"""
#WMS/WFS combined property list with default values
propList = {u"title": u"",
u"abstract": u"",
u"keyword": u"",
u"contactPerson": u"Inquiry Centre / Informathèque",
u"individualName": u"Inquiry Centre / Informathèque",
u"contactPosition": u"",
u"positionName": u"",
u"contactOrganization": u"Environment Canada / Environnement Canada",
u"providerName": u"Environment Canada / Environnement Canada",
u"address": u"10 Wellington, 23rd Floor / 10, rue Wellington, 23e étage",
u"deliveryPoint": u"10 Wellington, 23rd Floor / 10, rue Wellington, 23e étage",
u"addressType": u"",
u"city": u"Gatineau",
u"stateOrProvince": u"QC",
u"administrativeArea": u"QC",
u"postCode": u"K1A0H3",
u"postalCode": u"K1A0H3",
u"country": u"Canada",
u"contactVoiceTelephone": u"800-668-6767",
u"phone": u"800-668-6767",
u"contactFacsimileTelephone": u"819-994-1412",
u"facsimile": u"819-994-1412",
u"contactElectronicMailAddress": u"[email protected]",
u"electronicMailAddress": u"[email protected]",
u"fees": u"None / Aucun",
u"accessConstraints": u""}
metadataXML = os.path.abspath(os.path.join(workspace, "..", smallKey + ".xml"))
doc = DOM.parse(metadataXML)
identificationInfoNode = doc.getElementsByTagName("gmd:identificationInfo")
#Drill down to title node
citationNode = getFirstElement(identificationInfoNode, "gmd:citation")
titleNode = getFirstElement(citationNode, "gmd:title")
propList["title"] = joinStrings((getEnglishText(titleNode), getFrenchText(titleNode)))
#Drill down to abstract node
abstractNode = getFirstElement(identificationInfoNode, "gmd:abstract")
propList["abstract"] = joinStrings((getEnglishText(abstractNode), getFrenchText(abstractNode)))
#Drill down to position node
pointOfContactNode = getFirstElement(identificationInfoNode, "gmd:pointOfContact")
positionNameNode = getFirstElement(pointOfContactNode, "gmd:positionName")
propList["contactPosition"] = joinStrings((getEnglishText(positionNameNode), getFrenchText(positionNameNode)))
propList["positionName"] = propList["contactPosition"]
#Drill down to first keyword node
descriptiveKeywordsNode = getFirstElement(identificationInfoNode, "gmd:descriptiveKeywords")
keywordNode = getFirstElement(descriptiveKeywordsNode, "gmd:keyword")
propList["keyword"] = joinStrings((getEnglishText(keywordNode), getFrenchText(keywordNode)), ", ")
#Drill down to constraints node
resourceConstraintsNode = getFirstElement(identificationInfoNode, "gmd:resourceConstraints")
otherConstraintsNode = getFirstElement(resourceConstraintsNode, "gmd:otherConstraints")
propList["accessConstraints"] = joinStrings((getEnglishText(otherConstraintsNode), getFrenchText(otherConstraintsNode)))
return propList
def enableCapabilities(soeType, sddraft, smallKey, workspace,logs):
"""Enable capabilities for the service and set maxRecordCount.
Args:
soeType: List of capabilities.
sddraft: Path to Service Definition Draft file.
smallKey: Small key of current payload.
workspace: Absolute path of workspace folder.
logs: log list holds all log items for current publication
Returns:
Path to output .sddraft file.
"""
#Properties dictionary for WMS/WFS Service
propList = getMetadata(workspace, smallKey)
propList = escapeSpecialCharacters(propList)
#New maxRecordCount to set for publishing services (default: 1000)
maxRecordCount = 10000
#New maxInstances to set for publishing services (default: 2)
maxInstances = 1
#Read the sddraft xml.
doc = DOM.parse(sddraft)
#Find all elements named TypeName. This is where the server object
#extension (SOE) names are defined.
typeNames = doc.getElementsByTagName('TypeName')
for typeName in typeNames:
#Get the TypeName whose properties we want to modify.
if typeName.firstChild.data in soeType:
extension = typeName.parentNode
for extElement in extension.childNodes:
#Enabled SOE.
if extElement.tagName == 'Enabled':
extElement.firstChild.data = 'true'
#Set WMS/WFS service properties
if typeName.firstChild.data == "WMSServer" or typeName.firstChild.data == "WFSServer":
svcExtension = typeName.parentNode
for extElement in svcExtension.childNodes:
if extElement.tagName == "Props":
for propArray in extElement.childNodes:
for propSetProperty in propArray.childNodes:
for prop in propSetProperty.childNodes:
if prop.nodeType == 1 and prop.tagName == "Key":
setServiceProperties(prop, doc, propList)
#Set maxRecordCount for MapServer services
elif typeName.firstChild.data == "MapServer":
svcConfiguration = typeName.parentNode
for svcConfigElement in svcConfiguration.childNodes:
if svcConfigElement.tagName == "Definition":
for definitionElement in svcConfigElement.childNodes:
if definitionElement.tagName == "ConfigurationProperties":
for propArray in definitionElement.childNodes:
for propSet in propArray.childNodes:
for prop in propSet.childNodes:
if prop.tagName == "Key":
if prop.firstChild.data == "maxRecordCount":
prop.nextSibling.firstChild.data = maxRecordCount
print "maxRecordCount set to: %s" % str(maxRecordCount)
if definitionElement.tagName == "Props":
for propArray in definitionElement.childNodes:
for propSet in propArray.childNodes:
for prop in propSet.childNodes:
if prop.tagName == "Key":
if prop.firstChild.data == "MaxInstances":
prop.nextSibling.firstChild.data = maxInstances
print "maxInstances set to: %s" % str(maxInstances)
print "WMS/WFS service properties set"
#Output to a new sddraft
outXML = os.path.join(workspace, "ogcEnabledSDDraft.sddraft")
if os.path.exists(outXML):
os.remove(outXML)
f = open(outXML, 'w')
f.write(doc.toxml(encoding="utf-8"))
f.close()
checkError.printLog(logs,"Service definition created with %s enabled" % ", ".join(map(str, soeType)))
checkError.printLog(logs,"")
del f, doc
return outXML
def addFileSizeToJson(smallKey, smallKeyFolder, shpFolder):
"""Add the file size in bytes of the .shp file to the JSON descriptor.
Args:
smallKey: Small key of current payload.
smallKeyFolder: Folder in which data is unzipped to.
shpFolder: Folder containing the .shp file.
"""
os.chdir(shpFolder)
for file in glob.glob("*.shp"):
shpFileName = file
shpFilePath = os.path.join(shpFolder, shpFileName)
sizeInBytes = os.path.getsize(shpFilePath)
jsonPath = os.path.join(smallKeyFolder, smallKey) + '.json'
with open(jsonPath) as f:
data = json.load(f)
data["config"]["File_Size"] = sizeInBytes
with open(jsonPath, "w") as f:
json.dump(data, f)
def publishMXD(inFolder, mxd, connPath, serviceName, folder, logs, summary=None, tags=None):
"""Publish the service.
Args:
inFolder: Absolute path of workspace folder.
mxd: MXD file to publish.
connPath: Path to connection file that is used to connect to a GIS Server.
serviceName: Name of the service.
folder: Name of the folder to publish in.
logs: log list holds all log items for current publication
summary (optional): A string that represents the Item Description Summary (default=None).
tags (optional): A string that represents the Item Description Tags (default=None).
"""
workspace = inFolder
checkError.printLog(logs,"Publishing MXD in: " + workspace)
# Provide other service details
service = serviceName
sddraft = workspace + "/" + service + '.sddraft'
sd = workspace + "/" + service + '.sd'
folderName = folder
# make sure the folder is registered with the server, if not, add it to the
# datastore
#if workspace not in [i[2] for i in arcpy.ListDataStoreItems(connPath, 'FOLDER')]:
# # both the client and server paths are the same
# dsStatus = arcpy.AddDataStoreItem(connPath, "FOLDER", "Workspace for " + service, workspace, workspace)
# print "Data store: " + str(dsStatus)
# Create service definition draft
# Data will be copied to server
# Syntax: CreateMapSDDraft(map_document, out_sddraft, service_name,
# {server_type}, {connection_file_path}, {copy_data_to_server},
# {folder_name}, {summary}, {tags})
arcpy.mapping.CreateMapSDDraft(mxd, sddraft, service, 'ARCGIS_SERVER', connPath, True, folderName, summary, tags)
#Modify the sd to enable wms, wfs, and then wcs capabilities on the service
soeType = ['WMSServer', 'WFSServer', 'GeoJSONServer']
ogcSDDraft = enableCapabilities(soeType, sddraft, service, workspace,logs)
# Analyze the service definition draft
analysis = arcpy.mapping.AnalyzeForSD(ogcSDDraft)
# Print errors, warnings, and messages returned from the analysis
checkError.printLog(logs,"The following information was returned during analysis of the MXD:")
for key in ('messages', 'warnings', 'errors'):
checkError.printLog(logs,'----' + key.upper() + '---')
vars = analysis[key]
errorList =""
if not vars:
checkError.printLog(logs,' None')
else:
for ((message, code), layerlist) in vars.iteritems():
errorList= ' '+ message+ ' CODE %i' % code
errorList = errorList+ ' applies to:'
for layer in layerlist:
errorList= errorList+ layer.name,
checkError.printLog(logs,errorList)
# Stage and upload the service if the sddraft analysis did not contain
# errors
if analysis['errors'] == {}:
# Execute StageService. This creates the service definition.
arcpy.StageService_server(ogcSDDraft, sd)
# Execute UploadServiceDefinition. This uploads the service definition
# and publishes the service.
arcpy.UploadServiceDefinition_server(sd, connPath)
checkError.printLog(logs, "Service successfully published")
del ogcSDDraft
else:
checkError.printLog(logs,analysis['errors'])
checkError.printLog(logs,"Service could not be published because errors were found during analysis.")
| mit |
tsnoam/python-telegram-bot | tests/test_update.py | 1 | 2618 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents Tests for Telegram Update"""
import os
import unittest
import sys
sys.path.append('.')
import telegram
from tests.base import BaseTest
class UpdateTest(BaseTest, unittest.TestCase):
"""This object represents Tests for Telegram Update."""
def setUp(self):
self.update_id = 868573637
self.message = {'message_id': 319,
'from': {'id': 12173560,
'first_name': "Leandro",
'last_name': "S.",
'username': "leandrotoledo"},
'chat': {'id': 12173560,
'type': 'private',
'first_name': "Leandro",
'last_name': "S.",
'username': "leandrotoledo"},
'date': 1441644592,
'text': "Update Test"}
self.json_dict = {
'update_id': self.update_id,
'message': self.message
}
def test_update_de_json(self):
update = telegram.Update.de_json(self.json_dict)
self.assertEqual(update.update_id, self.update_id)
self.assertTrue(isinstance(update.message, telegram.Message))
def test_update_to_json(self):
update = telegram.Update.de_json(self.json_dict)
self.assertTrue(self.is_json(update.to_json()))
def test_update_to_dict(self):
update = telegram.Update.de_json(self.json_dict)
self.assertTrue(self.is_dict(update.to_dict()))
self.assertEqual(update['update_id'], self.update_id)
self.assertTrue(isinstance(update['message'], telegram.Message))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jjgomera/pychemqt | lib/mEoS/MM.py | 1 | 7485 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
from unittest import TestCase
from lib import unidades
from lib.meos import MEoS
class MM(MEoS):
"""Multiparameter equation of state for hexamethyldisiloxane"""
name = "hexamethyldisiloxane"
CASNumber = "107-46-0"
formula = "C6H18OSi2"
synonym = "MM"
_refPropName = "MM"
_coolPropName = "MM"
rhoc = unidades.Density(304.4043888253152)
Tc = unidades.Temperature(518.69997204)
Pc = unidades.Pressure(1939.39, "kPa")
M = 162.3768 # g/mol
Tt = unidades.Temperature(204.93)
Tb = unidades.Temperature(373.401)
f_acent = 0.418
momentoDipolar = unidades.DipoleMoment(0.801, "Debye")
id = 1376
Fi1 = {"ao_log": [1, 3],
"pow": [0, 1],
"ao_pow": [72.110754, -10.431499],
"ao_exp": [18.59, 29.58, 19.74, 4.87],
"titao": [20/Tc, 1400/Tc, 3600/Tc, 6300/Tc]}
f = 8.314472
CP1 = {"ao": 51.894/f,
"an": [741.34e-3/f, -416e-6/f, 70e-9/f],
"pow": [1, 2, 3]}
thol = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for hexamethyldisiloxane of "
"Thol (2015).",
"__doi__": {"autor": "Thol, M., Dubberke, F.H., Rutkai, G., Windmann, "
"T., Köster, A., Span, R., Vrabec, J.",
"title": "Fundamental equation of state correlation for "
"hexamethyldisiloxane based on experimental and "
"molecular simulation data",
"ref": "Fluid Phase Equilibria 418 (2016) 133-151",
"doi": "10.1016/j.fluid.2015.09.047"},
"R": 8.3144621,
"cp": Fi1,
"ref": "OTO",
"Tmin": Tt, "Tmax": 1200.0, "Pmax": 600000.0, "rhomax": 5.266,
"M": 162.3768, "Tc": 518.7, "rhoc": 1.653, "Pc": 1931.1,
"nr1": [0.5063651e-1, 8.604724, -9.179684, -1.146325, 0.4878559],
"d1": [4, 1, 1, 2, 3],
"t1": [1, 0.346, 0.46, 1.01, 0.59],
"nr2": [-2.434088, -1.621326, 0.6239872, -2.306057, -0.5555096e-1],
"d2": [1, 3, 2, 2, 7],
"t2": [2.600, 3.330, 0.750, 2.950, 0.930],
"c2": [2, 2, 1, 2, 1],
"gamma2": [1]*5,
"nr3": [9.385015, -2.493508, -3.308032, -0.1885803, -0.9883865e-1,
0.1111090, 0.1061928, -0.1452454e-1],
"d3": [1, 1, 3, 3, 1, 2, 3, 1],
"t3": [1.33, 1.68, 1.7, 3.08, 5.41, 1.4, 1.1, 5.3],
"alfa3": [1.0334, 1.544, 1.113, 1.113, 1.11, 7.2, 1.45, 4.73],
"beta3": [0.4707, 0.32, 0.404, 0.517, 0.432, 7.2, 1.2, 35.8],
"gamma3": [1.7754, 0.692, 1.242, 0.421, 0.406, 0.163, 0.795, 0.88],
"epsilon3": [0.8927, 0.5957, 0.559, 1.056, 1.3, 0.106, 0.181, 0.525],
"nr4": []}
colonna = {
"__type__": "Helmholtz",
"__name__": "Helmholtz equation of state for hexamethyldisiloxane of "
"Colonna (2006).",
"__doi__": {"autor": "Colonna, P., Nannan, N.R., Guardone, A., "
"Lemmon, E.W.",
"title": "Multiparameter Equations of State for Selected "
"Siloxanes",
"ref": "Fluid Phase Equilibria, 244:193-211, 2006.",
"doi": "10.1016/j.fluid.2006.04.015"},
"R": 8.314472,
"cp": CP1,
"ref": "NBP",
"Tmin": 273.0, "Tmax": 673.0, "Pmax": 30000.0, "rhomax": 5.21,
"nr1": [1.01686012, -2.19713029, 0.75443188, -0.68003426, 0.19082162,
0.10530133e-2],
"d1": [1, 1, 1, 2, 3, 7],
"t1": [0.25, 1.125, 1.5, 1.375, 0.25, 0.875],
"nr2": [0.6284595, 0.30903042e-1, -0.83948727, -0.20262381,
-0.35131597e-1, 0.25902341e-1],
"d2": [2, 5, 1, 4, 3, 4],
"t2": [0.625, 1.75, 3.625, 3.625, 14.5, 12.0],
"c2": [1, 1, 2, 2, 3, 3],
"gamma2": [1]*6}
eq = thol, colonna
_vapor_Pressure = {
"eq": 3,
"n": [-0.850230e1, 0.380300e1, -0.341500e1, -0.467900e1, -0.310600e1],
"t": [1.0, 1.5, 1.98, 3.86, 14.6]}
_liquid_Density = {
"eq": 1,
"n": [0.4003e1, -0.6406e1, 0.115e2, -0.1004e2, 0.4e1],
"t": [0.436, 0.827, 1.24, 1.7, 2.23]}
_vapor_Density = {
"eq": 2,
"n": [-0.37421e1, -0.37087e2, 0.7546e2, -0.7167e2, -68.69, -178.4],
"t": [0.428, 1.79, 2.28, 2.8, 7, 15.4]}
class Test(TestCase):
def test_thol(self):
# Test in thesis
# Thol, M.
# Empirical Multiparameter Equations of State Based on Molecular
# Simulation and Hybrid Data Sets
# PhD thesis, Ruhr-Universität Bochum, 2015.
# Appendix A, Pag 259
# The two first point are inverted in table
st = MM(T=250, rhom=0.0001, eq="thol")
self.assertEqual(round(st.P.MPa, 11), 2.0772979e-4)
self.assertEqual(round(st.cpM.JmolK, 5), 216.58261)
self.assertEqual(round(st.w, 5), 115.31572)
self.assertEqual(round(st.hM.Jmol, 4), 1715.1951)
self.assertEqual(round(st.sM.JmolK, 6), 38.943461)
self.assertEqual(round(st.aM.Jmol, 3), -10097.968)
st = MM(T=250, rhom=5, eq="thol")
self.assertEqual(round(st.P.MPa, 7), 2.3550378)
self.assertEqual(round(st.cpM.JmolK, 5), 290.08361)
self.assertEqual(round(st.w, 4), 1068.3855)
self.assertEqual(round(st.hM.Jmol, 3), -38660.057)
self.assertEqual(round(st.sM.JmolK, 5), -126.50074)
self.assertEqual(round(st.aM.Jmol, 4), -7505.8794)
st = MM(T=400, rhom=0.05, eq="thol")
self.assertEqual(round(st.P.MPa, 8), 0.15367468)
self.assertEqual(round(st.cpM.JmolK, 5), 293.72933)
self.assertEqual(round(st.w, 5), 134.70433)
self.assertEqual(round(st.hM.Jmol, 3), 38493.817)
self.assertEqual(round(st.sM.JmolK, 6), 99.143187)
self.assertEqual(round(st.aM.Jmol, 4), -4236.9519)
st = MM(T=400, rhom=4.5, eq="thol")
self.assertEqual(round(st.P.MPa, 6), 40.937214)
self.assertEqual(round(st.cpM.JmolK, 5), 339.40133)
self.assertEqual(round(st.w, 5), 930.21218)
self.assertEqual(round(st.hM.Jmol, 3), 13672.106)
self.assertEqual(round(st.sM.JmolK, 6), 11.063873)
self.assertEqual(round(st.aM.Jmol, 5), 149.39757)
st = MM(T=560, rhom=4.5, eq="thol")
self.assertEqual(round(st.P.MPa, 5), 123.02530)
self.assertEqual(round(st.cpM.JmolK, 5), 387.27687)
self.assertEqual(round(st.w, 4), 1132.8991)
self.assertEqual(round(st.hM.Jmol, 3), 83661.457)
self.assertEqual(round(st.sM.JmolK, 5), 119.31484)
self.assertEqual(round(st.aM.Jmol, 3), -10493.807)
| gpl-3.0 |
40223219/2015_midterm | static/Brython3.1.1-20150328-091302/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| gpl-3.0 |
alexlib/Qt-Python-Binding-Examples | windows/custom_win_flags.py | 1 | 1096 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
demo template
Tested environment:
Mac OS X 10.6.8
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class SheetWin(QtGui.QWidget):
def __init__(self, parent = None):
super(SheetWin, self).__init__(parent)
self.setWindowFlags(QtCore.Qt.Sheet)
btn = QtGui.QPushButton("close", self)
btn.move(10, 10)
btn.clicked.connect(self.close)
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
btn = QtGui.QPushButton("btn", self)
btn.clicked.connect(self.btn_cb)
def btn_cb(self):
sw_obj = SheetWin(self)
sw_obj.show()
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | bsd-3-clause |
ContinuumIO/dask | dask/cache.py | 2 | 2000 | from .callbacks import Callback
from timeit import default_timer
from numbers import Number
import sys
overhead = sys.getsizeof(1.23) * 4 + sys.getsizeof(()) * 4
class Cache(Callback):
""" Use cache for computation
Examples
--------
>>> cache = Cache(1e9) # doctest: +SKIP
The cache can be used locally as a context manager around ``compute`` or
``get`` calls:
>>> with cache: # doctest: +SKIP
... result = x.compute()
You can also register a cache globally, so that it works for all
computations:
>>> cache.register() # doctest: +SKIP
>>> cache.unregister() # doctest: +SKIP
"""
def __init__(self, cache, *args, **kwargs):
try:
import cachey
except ImportError as ex:
raise ImportError(
'Cache requires cachey, "{ex}" problem ' "importing".format(ex=str(ex))
) from ex
self._nbytes = cachey.nbytes
if isinstance(cache, Number):
cache = cachey.Cache(cache, *args, **kwargs)
else:
assert not args and not kwargs
self.cache = cache
self.starttimes = dict()
def _start(self, dsk):
self.durations = dict()
overlap = set(dsk) & set(self.cache.data)
for key in overlap:
dsk[key] = self.cache.data[key]
def _pretask(self, key, dsk, state):
self.starttimes[key] = default_timer()
def _posttask(self, key, value, dsk, state, id):
duration = default_timer() - self.starttimes[key]
deps = state["dependencies"][key]
if deps:
duration += max(self.durations.get(k, 0) for k in deps)
self.durations[key] = duration
nb = self._nbytes(value) + overhead + sys.getsizeof(key) * 4
self.cache.put(key, value, cost=duration / nb / 1e9, nbytes=nb)
def _finish(self, dsk, state, errored):
self.starttimes.clear()
self.durations.clear()
| bsd-3-clause |
HybridF5/tempest | tempest/api/volume/test_volumes_negative.py | 5 | 13622 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest.lib import exceptions as lib_exc
from tempest import test
class VolumesV2NegativeTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2NegativeTest, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2NegativeTest, cls).resource_setup()
cls.name_field = cls.special_fields['name_field']
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
cls.mountpoint = "/dev/vdc"
@test.attr(type=['negative'])
@test.idempotent_id('f131c586-9448-44a4-a8b0-54ca838aa43e')
def test_volume_get_nonexistent_volume_id(self):
# Should not be able to get a non-existent volume
self.assertRaises(lib_exc.NotFound, self.client.show_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('555efa6e-efcd-44ef-8a3b-4a7ca4837a29')
def test_volume_delete_nonexistent_volume_id(self):
# Should not be able to delete a non-existent Volume
self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('1ed83a8a-682d-4dfb-a30e-ee63ffd6c049')
def test_create_volume_with_invalid_size(self):
# Should not be able to create volume with invalid size
# in request
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('9387686f-334f-4d31-a439-33494b9e2683')
def test_create_volume_with_out_passing_size(self):
# Should not be able to create volume without passing size
# in request
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('41331caa-eaf4-4001-869d-bc18c1869360')
def test_create_volume_with_size_zero(self):
# Should not be able to create volume with size zero
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('8b472729-9eba-446e-a83b-916bdb34bef7')
def test_create_volume_with_size_negative(self):
# Should not be able to create volume with size negative
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.BadRequest, self.client.create_volume,
size='-1', display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('10254ed8-3849-454e-862e-3ab8e6aa01d2')
def test_create_volume_with_nonexistent_volume_type(self):
# Should not be able to create volume with non-existent volume type
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', volume_type=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9')
def test_create_volume_with_nonexistent_snapshot_id(self):
# Should not be able to create volume with non-existent snapshot
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', snapshot_id=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344')
def test_create_volume_with_nonexistent_source_volid(self):
# Should not be able to create volume with non-existent source volume
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.create_volume,
size='1', source_volid=str(uuid.uuid4()),
display_name=v_name, metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('0186422c-999a-480e-a026-6a665744c30c')
def test_update_volume_with_nonexistent_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id=str(uuid.uuid4()), display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
def test_update_volume_with_invalid_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id='#$%%&^&^', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
def test_update_volume_with_empty_volume_id(self):
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'work'}
self.assertRaises(lib_exc.NotFound, self.client.update_volume,
volume_id='', display_name=v_name,
metadata=metadata)
@test.attr(type=['negative'])
@test.idempotent_id('30799cfd-7ee4-446c-b66c-45b383ed211b')
def test_get_invalid_volume_id(self):
# Should not be able to get volume with invalid id
self.assertRaises(lib_exc.NotFound, self.client.show_volume,
'#$%%&^&^')
@test.attr(type=['negative'])
@test.idempotent_id('c6c3db06-29ad-4e91-beb0-2ab195fe49e3')
def test_get_volume_without_passing_volume_id(self):
# Should not be able to get volume when empty ID is passed
self.assertRaises(lib_exc.NotFound, self.client.show_volume, '')
@test.attr(type=['negative'])
@test.idempotent_id('1f035827-7c32-4019-9240-b4ec2dbd9dfd')
def test_delete_invalid_volume_id(self):
# Should not be able to delete volume when invalid ID is passed
self.assertRaises(lib_exc.NotFound, self.client.delete_volume,
'!@#$%^&*()')
@test.attr(type=['negative'])
@test.idempotent_id('441a1550-5d44-4b30-af0f-a6d402f52026')
def test_delete_volume_without_passing_volume_id(self):
# Should not be able to delete volume when empty ID is passed
self.assertRaises(lib_exc.NotFound, self.client.delete_volume, '')
@test.attr(type=['negative'])
@test.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6')
@test.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
srv_name = data_utils.rand_name('Instance')
server = self.create_server(
name=srv_name,
wait_until='ACTIVE')
self.addCleanup(waiters.wait_for_server_termination,
self.servers_client, server['id'])
self.addCleanup(self.servers_client.delete_server, server['id'])
self.assertRaises(lib_exc.NotFound,
self.client.attach_volume,
str(uuid.uuid4()),
instance_uuid=server['id'],
mountpoint=self.mountpoint)
@test.attr(type=['negative'])
@test.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
def test_detach_volumes_with_invalid_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.detach_volume,
'xxx')
@test.attr(type=['negative'])
@test.idempotent_id('e0c75c74-ee34-41a9-9288-2a2051452854')
def test_volume_extend_with_size_smaller_than_original_size(self):
# Extend volume with smaller size than original size.
extend_size = 0
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
def test_volume_extend_with_non_number_size(self):
# Extend volume when size is non number.
extend_size = 'abc'
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
def test_volume_extend_with_None_size(self):
# Extend volume with None size.
extend_size = None
self.assertRaises(lib_exc.BadRequest, self.client.extend_volume,
self.volume['id'], new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
def test_volume_extend_with_nonexistent_volume_id(self):
# Extend volume size when volume is nonexistent.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
str(uuid.uuid4()), new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
def test_volume_extend_without_passing_volume_id(self):
# Extend volume size when passing volume id is None.
extend_size = int(self.volume['size']) + 1
self.assertRaises(lib_exc.NotFound, self.client.extend_volume,
None, new_size=extend_size)
@test.attr(type=['negative'])
@test.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
def test_reserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.reserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('eb467654-3dc1-4a72-9b46-47c29d22654c')
def test_unreserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.client.unreserve_volume,
str(uuid.uuid4()))
@test.attr(type=['negative'])
@test.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c')
def test_reserve_volume_with_negative_volume_status(self):
# Mark volume as reserved.
self.client.reserve_volume(self.volume['id'])
# Mark volume which is marked as reserved before
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_volume,
self.volume['id'])
# Unmark volume as reserved.
self.client.unreserve_volume(self.volume['id'])
@test.attr(type=['negative'])
@test.idempotent_id('0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f')
def test_list_volumes_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume')
params = {self.name_field: v_name}
fetched_volume = self.client.list_volumes(params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('9ca17820-a0e7-4cbd-a7fa-f4468735e359')
def test_list_volumes_detail_with_nonexistent_name(self):
v_name = data_utils.rand_name('Volume')
params = {self.name_field: v_name}
fetched_volume = \
self.client.list_volumes(detail=True, params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('143b279b-7522-466b-81be-34a87d564a7c')
def test_list_volumes_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = self.client.list_volumes(params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
@test.attr(type=['negative'])
@test.idempotent_id('ba94b27b-be3f-496c-a00e-0283b373fa75')
def test_list_volumes_detail_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = \
self.client.list_volumes(detail=True, params=params)['volumes']
self.assertEqual(0, len(fetched_volume))
class VolumesV1NegativeTest(VolumesV2NegativeTest):
_api_version = 1
_name = 'display_name'
| apache-2.0 |
codecakes/random_games | single_linkedlist.py | 1 | 8155 | ## Singly Linked List
## Each Node is relatively Head to the next node it refers to.
## The Linked List can be broken down into:
## Nodes.
"""
Single Linked List can:
- Insert at Head, Tail, Inbetween
- Delete at Head, Tail, Inbetween
- Add another LinkedList to Existing
"""
class LinkedNode(object):
"""
Nodes have following attributes:
1. Node Value
2. Next Node
3. Head Node?
4. Last Node?
"""
def __init__(self):
self.val = None
self.nxt = None
#this is telling the length of orphan node self or the length of chain
#so far including the self node
self.len = 1
#set the following if part of a linked list chain
self.head = 0
self.last = 0
#this ptr in each node tells the head node ptr
self.headptr = None
def setVal(self, val):
self.val = val
def getVal(self): return self.val
def getLen(self): return self.len
def setNxt(self, other):
self.nxt = other #O(1)
def setHead(self):
"""If there is a successive node, set this as Head node"""
if self.hasNxt() and self.head==0:
self.head = 1
self.headptr = self
def setLast(self):
"""If this is the last node, set this as Last node"""
if not self.hasNxt() and self.last==0:
self.last = 1
def insertHead(self, newNode):
"""Insert newNode as Head node"""
if self.isHead():
node = self
else:
node = self.headptr
if node:
newNode.len = 1
newNode.setNxt(node) #O(1)
newNode.setHead()
def insertLast(self, newNode):
"""insert newNode as Last node"""
newNode.setLast()
node = self
#O(k<=n)
while not node.isLast():
node = node.getNxt()
node.last = 0
node.setNxt(newNode)
newNode.len = node.len + 1
newNode.headptr = self.headptr
def getNxt(self): return self.nxt
def hasNxt(self): return self.getNxt() != None
def disconnectNxt(self):
if self.hasNxt():
self.nxt = None
self.head = 0
def isHead(self): return self.head == 1
def isLast(self): return self.last == 1
class SingleLinkedList(object):
def __init__(self, link_node):
self.head_node = link_node
self.last_node = None
self.master_ln = 0
self.updateHead(self.head_node)
def add_node(self, val):
assert self.head_node == self.last_node.headptr
newNode = LinkedNode()
newNode.setVal(val)
self.last_node.setNxt(newNode)
self.last_node = newNode
newNode.len = self.master_ln + 1
self.master_ln = newNode.len
newNode.headptr = self.head_node
def deleteNode(self, val):
prev = node = self.head_node
node_val = node.val
while node_val != val and node.hasNxt():
prev = node
node = node.getNxt()
node_val = node.val
if node_val == val: break
if node_val == val:
if node.isLast():
#if its last node
prev.disconnectNxt()
head = prev.headptr
elif node.isHead():
#if its head node
nxt = node.getNxt()
node.disconnectNxt()
nxt.setHead()
head = nxt
elif node.hasNxt():
#if its somewhere between
nxt = node.getNxt()
node.disconnectNxt()
nxt.len = prev.len + 1
prev.setNxt(nxt)
head = prev.headptr
self.updateHead(head)
def updateHead(self, headptr):
"""
Set each node's headptr to head node of Chain.
Set incremental length as node increases
"""
node = headptr
self.head_node = node.headptr = headptr
node.head = 1
node.len = 1
ln = node.len
#till the end of chain
while node.hasNxt():
#get next node
node = node.getNxt()
#Set each node's headptr to head node of Chain
node.headptr = headptr
node.head = 0
#Set incremental length as node increases
node.len = ln + 1
ln = node.len
node.setLast()
self.last_node = node
self.master_ln = ln
assert node.headptr.len == 1
def updateList(self, otherlist):
"""Merge another linked list from end to current linked list"""
other = otherlist.head_node
if other.isHead(): other.head = 0
#Ripple headptr and inc length across nodes
self.last_node.setNxt(other)
self.updateHead(self.head_node)
def insertPos(self, val, pos):
"""Insert newNode as position pos if legit.
Current Pos is always 1 and relatively current node is the start node.
But each node gives absolute chain/linked-list length.
"""
if pos < 0 or pos > self.master_ln: return
newNode = LinkedNode()
newNode.setVal(val)
if pos == self.master_ln:
self.last_node.insertLast(newNode)
self.master_ln += 1
#newNode.headptr = self.head_node
self.last_node = newNode
return
elif pos == self.head_node.len:
self.head_node.insertHead(newNode)
self.head_node = newNode
self.updateHead(self.head_node)
return
node = self.head_node
while node.len < pos-1:
node = node.getNxt()
if node.len == pos-1: break
assert node.len == pos-1
nxt_node = node.getNxt()
node.setNxt(newNode) #newNode has nxt_node's postion
newNode.setNxt(nxt_node) #nxt_node's position is incremented by 1
self.updateHead(self.head_node)
return
#just for testing
if __name__ == "__main__":
a,b,c,d,n = [LinkedNode() for _ in xrange(5)]
n.setVal(1)
map(lambda x: x[0].setVal(x[1]), ((a,2), (b,3), (c,4), (d,5)))
n.setNxt(a)
a.setNxt(b)
b.setNxt(c)
L = SingleLinkedList(n)
node = L.head_node
print "="*10
while node.hasNxt():
print node.val
print node.headptr.val
print node.len
print
node = node.getNxt()
print node.val
print node.headptr.val
print node.len
L.insertPos(40, 2)
#L.insertPos(1, 30)
node = L.head_node
print "="*10
while node.hasNxt():
print node.val
print node.headptr.val
print node.len
print
node = node.getNxt()
print node.val
print node.headptr.val
print node.len
L.deleteNode(40)
L.deleteNode(3)
L.deleteNode(1)
L.deleteNode(2)
print "="*10
node = L.head_node
while node.hasNxt():
print node.val
print node.headptr.val
print node.len
print
node = node.getNxt()
print node.val
print node.headptr.val
print node.len
L.add_node(40)
L.insertPos(20, 1)
print "="*10
node = L.head_node
while node.hasNxt():
print node.val
print node.headptr.val
print node.len
print
node = node.getNxt()
print node.val
print node.headptr.val
print node.len
f,g,h = [LinkedNode() for _ in xrange(3)]
map(lambda x: x[0].setVal(x[1]), ((f,300), (g,45), (h, 56)))
f.setNxt(g)
g.setNxt(h)
R = SingleLinkedList(f)
L.updateList(R)
print "="*10
node = L.head_node
while node.hasNxt():
print node.val
print node.headptr.val
print node.len
print
node = node.getNxt()
print node.val
print node.headptr.val
print node.len
| mit |
googlefonts/fontbakery | Lib/fontbakery/reporters/__init__.py | 1 | 4250 | """
Separation of Concerns Disclaimer:
While created specifically for checking fonts and font-families this
module has no domain knowledge about fonts. It can be used for any kind
of (document) checking. Please keep it so. It will be valuable for other
domains as well.
Domain specific knowledge should be encoded only in the Profile (Checks,
Conditions) and MAYBE in *customized* reporters e.g. subclasses.
"""
from collections import Counter
from fontbakery.checkrunner import (
DEBUG
, INFO
, WARN
, ERROR
, STARTCHECK
, SKIP
, PASS
, FAIL
, ENDCHECK
, SECTIONSUMMARY
, START
, END
)
from fontbakery.errors import ProtocolViolationError
class FontbakeryReporter:
def __init__(self, is_async=False, runner=None, output_file=None, loglevels=None):
self._started = None
self._ended = None
self._order = None
self._results = [] # ENDCHECK events in order of appearance
self._indexes = {}
self._tick = 0
self._counter = Counter()
self.loglevels = loglevels
# Runner should know if it is async!
self.is_async = is_async
self.runner = runner
self._worst_check_status = None
self.output_file = output_file
def run(self, order=None):
"""
self.runner must be present
"""
for event in self.runner.run(order=order):
self.receive(event)
@property
def order(self):
return self._order
def write(self):
if self.output_file is not None:
raise NotImplementedError(
f'{type(self)} does not implement the "write" method, '
'but it has an "output_file".'
)
# reporters without an output file do nothing here
def _get_key(self, identity):
section, check, iterargs = identity
return (str(section) if section else section
, str(check) if check else check
, iterargs
)
def _get_index(self, identity):
key = self._get_key(identity)
try:
return self._indexes[key]
except KeyError:
self._indexes[key] = len(self._indexes)
return self._indexes[key]
def _set_order(self, order):
self._order = tuple(order)
length = len(self._order)
self._counter['(not finished)'] = length - len(self._results)
self._indexes = dict(zip(map(self._get_key, self._order), range(length)))
def _cleanup(self, event):
pass
def _output(self, event):
pass
def _register(self, event):
status, message, identity = event
self._tick += 1
if status == START:
self._set_order(message)
self._started = event
if status == END:
self._ended = event
if status == ENDCHECK:
self._results.append(event)
self._counter[message.name] += 1
self._counter['(not finished)'] -= 1
@property
def worst_check_status(self):
""" Returns a status or None if there was no check result """
return self._worst_check_status
def receive(self, event):
status, message, identity = event
if self._started is None and status != START:
raise ProtocolViolationError(f'Received Event before status START:'
f' {status} {message}.')
if self._ended:
status, message, identity = event
raise ProtocolViolationError(f'Received Event after status END:'
f' {status} {message}.')
if status is ENDCHECK and (self._worst_check_status is None \
or self._worst_check_status < message):
# we only record ENDCHECK, because check runner may in the future
# have tools to upgrade/downgrade the actually worst status
# this should be future proof.
self._worst_check_status = message
self._register(event)
self._cleanup(event)
self._output(event)
| apache-2.0 |
JeyZeta/Dangerous | Dangerous/Golismero/thirdparty_libs/dns/zone.py | 26 | 37794 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
from __future__ import generators
import sys
import re
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rrset
import dns.tokenizer
import dns.ttl
import dns.grange
class BadZone(dns.exception.DNSException):
"""The zone is malformed."""
pass
class NoSOA(BadZone):
"""The zone has no SOA RR at its origin."""
pass
class NoNS(BadZone):
"""The zone has no NS RRset at its origin."""
pass
class UnknownOrigin(BadZone):
"""The zone's origin is unknown."""
pass
class Zone(object):
"""A DNS zone.
A Zone is a mapping from names to nodes. The zone object may be
treated like a Python dictionary, e.g. zone[name] will retrieve
the node associated with that name. The I{name} may be a
dns.name.Name object, or it may be a string. In the either case,
if the name is relative it is treated as relative to the origin of
the zone.
@ivar rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@ivar origin: The origin of the zone.
@type origin: dns.name.Name object
@ivar nodes: A dictionary mapping the names of nodes in the zone to the
nodes themselves.
@type nodes: dict
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@cvar node_factory: the factory used to create a new node
@type node_factory: class or callable
"""
node_factory = dns.node.Node
__slots__ = ['rdclass', 'origin', 'nodes', 'relativize']
def __init__(self, origin, rdclass=dns.rdataclass.IN, relativize=True):
"""Initialize a zone object.
@param origin: The origin of the zone.
@type origin: dns.name.Name object
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int"""
self.rdclass = rdclass
self.origin = origin
self.nodes = {}
self.relativize = relativize
def __eq__(self, other):
"""Two zones are equal if they have the same origin, class, and
nodes.
@rtype: bool
"""
if not isinstance(other, Zone):
return False
if self.rdclass != other.rdclass or \
self.origin != other.origin or \
self.nodes != other.nodes:
return False
return True
def __ne__(self, other):
"""Are two zones not equal?
@rtype: bool
"""
return not self.__eq__(other)
def _validate_name(self, name):
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, None)
elif not isinstance(name, dns.name.Name):
raise KeyError("name parameter must be convertable to a DNS name")
if name.is_absolute():
if not name.is_subdomain(self.origin):
raise KeyError("name parameter must be a subdomain of the zone origin")
if self.relativize:
name = name.relativize(self.origin)
return name
def __getitem__(self, key):
key = self._validate_name(key)
return self.nodes[key]
def __setitem__(self, key, value):
key = self._validate_name(key)
self.nodes[key] = value
def __delitem__(self, key):
key = self._validate_name(key)
del self.nodes[key]
def __iter__(self):
return self.nodes.iterkeys()
def iterkeys(self):
return self.nodes.iterkeys()
def keys(self):
return self.nodes.keys()
def itervalues(self):
return self.nodes.itervalues()
def values(self):
return self.nodes.values()
def iteritems(self):
return self.nodes.iteritems()
def items(self):
return self.nodes.items()
def get(self, key):
key = self._validate_name(key)
return self.nodes.get(key)
def __contains__(self, other):
return other in self.nodes
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node
def get_node(self, name, create=False):
"""Get a node in the zone, possibly creating it.
This method is like L{find_node}, except it returns None instead
of raising an exception if the node does not exist and creation
has not been requested.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@rtype: dns.node.Node object or None
"""
try:
node = self.find_node(name, create)
except KeyError:
node = None
return node
def delete_node(self, name):
"""Delete the specified node if it exists.
It is not an error if the node does not exist.
"""
name = self._validate_name(name)
if self.nodes.has_key(name):
del self.nodes[name]
def find_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
KeyError is raised if the name or type are not found.
Use L{get_rdataset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
node = self.find_node(name, create)
return node.find_rdataset(self.rdclass, rdtype, covers, create)
def get_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Look for rdata with the specified name and type in the zone,
and return an rdataset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
The rdataset returned is not a copy; changes to it will change
the zone.
None is returned if the name or type are not found.
Use L{find_rdataset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@param create: should the node and rdataset be created if they do not
exist?
@type create: bool
@rtype: dns.rrset.RRset object
"""
try:
rdataset = self.find_rdataset(name, rdtype, covers, create)
except KeyError:
rdataset = None
return rdataset
def delete_rdataset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching I{rdtype} and I{covers}, if it
exists at the node specified by I{name}.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
It is not an error if the node does not exist, or if there is no
matching rdataset at the node.
If the node has no rdatasets after the deletion, it will itself
be deleted.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
name = self._validate_name(name)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
node = self.get_node(name)
if not node is None:
node.delete_rdataset(self.rdclass, rdtype, covers)
if len(node) == 0:
self.delete_node(name)
def replace_rdataset(self, name, replacement):
"""Replace an rdataset at name.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the zone;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
If the I{name} node does not exist, it is created.
@param name: the owner name
@type name: DNS.name.Name object or string
@param replacement: the replacement rdataset
@type replacement: dns.rdataset.Rdataset
"""
if replacement.rdclass != self.rdclass:
raise ValueError('replacement.rdclass != zone.rdclass')
node = self.find_node(name, True)
node.replace_rdataset(replacement)
def find_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar
L{find_rdataset} because it creates an RRset instead of
returning the matching rdataset. It may be more convenient
for some uses since it returns an object which binds the owner
name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
KeyError is raised if the name or type are not found.
Use L{get_rrset} if you want to have None returned instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@raises KeyError: the node or rdata could not be found
@rtype: dns.rrset.RRset object
"""
name = self._validate_name(name)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
rdataset = self.nodes[name].find_rdataset(self.rdclass, rdtype, covers)
rrset = dns.rrset.RRset(name, self.rdclass, rdtype, covers)
rrset.update(rdataset)
return rrset
def get_rrset(self, name, rdtype, covers=dns.rdatatype.NONE):
"""Look for rdata with the specified name and type in the zone,
and return an RRset encapsulating it.
The I{name}, I{rdtype}, and I{covers} parameters may be
strings, in which case they will be converted to their proper
type.
This method is less efficient than the similar L{get_rdataset}
because it creates an RRset instead of returning the matching
rdataset. It may be more convenient for some uses since it
returns an object which binds the owner name to the rdata.
This method may not be used to create new nodes or rdatasets;
use L{find_rdataset} instead.
None is returned if the name or type are not found.
Use L{find_rrset} if you want to have KeyError raised instead.
@param name: the owner name to look for
@type name: DNS.name.Name object or string
@param rdtype: the rdata type desired
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
@rtype: dns.rrset.RRset object
"""
try:
rrset = self.find_rrset(name, rdtype, covers)
except KeyError:
rrset = None
return rrset
def iterate_rdatasets(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, rdataset) tuples for
all rdatasets in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatasets will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
yield (name, rds)
def iterate_rdatas(self, rdtype=dns.rdatatype.ANY,
covers=dns.rdatatype.NONE):
"""Return a generator which yields (name, ttl, rdata) tuples for
all rdatas in the zone which have the specified I{rdtype}
and I{covers}. If I{rdtype} is dns.rdatatype.ANY, the default,
then all rdatas will be matched.
@param rdtype: int or string
@type rdtype: int or string
@param covers: the covered type (defaults to None)
@type covers: int or string
"""
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(covers, (str, unicode)):
covers = dns.rdatatype.from_text(covers)
for (name, node) in self.iteritems():
for rds in node:
if rdtype == dns.rdatatype.ANY or \
(rds.rdtype == rdtype and rds.covers == covers):
for rdata in rds:
yield (name, rds.ttl, rdata)
def to_file(self, f, sorted=True, relativize=True, nl=None):
"""Write a zone to a file.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param sorted: if True, the file will be written with the
names sorted in DNSSEC order from least to greatest. Otherwise
the names will be written in whatever order they happen to have
in the zone's dictionary.
@param relativize: if True, domain names in the output will be
relativized to the zone's origin (if possible).
@type relativize: bool
@param nl: The end of line string. If not specified, the
output will use the platform's native end-of-line marker (i.e.
LF on POSIX, CRLF on Windows, CR on Macintosh).
@type nl: string or None
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames
str_type = basestring
else:
str_type = str
if nl is None:
opts = 'w'
else:
opts = 'wb'
if isinstance(f, str_type):
f = file(f, opts)
want_close = True
else:
want_close = False
try:
if sorted:
names = self.keys()
names.sort()
else:
names = self.iterkeys()
for n in names:
l = self[n].to_text(n, origin=self.origin,
relativize=relativize)
if nl is None:
print >> f, l
else:
f.write(l)
f.write(nl)
finally:
if want_close:
f.close()
def check_origin(self):
"""Do some simple checking of the zone's origin.
@raises dns.zone.NoSOA: there is no SOA RR
@raises dns.zone.NoNS: there is no NS RRset
@raises KeyError: there is no origin node
"""
if self.relativize:
name = dns.name.empty
else:
name = self.origin
if self.get_rdataset(name, dns.rdatatype.SOA) is None:
raise NoSOA
if self.get_rdataset(name, dns.rdatatype.NS) is None:
raise NoNS
class _MasterReader(object):
"""Read a DNS master file
@ivar tok: The tokenizer
@type tok: dns.tokenizer.Tokenizer object
@ivar ttl: The default TTL
@type ttl: int
@ivar last_name: The last name read
@type last_name: dns.name.Name object
@ivar current_origin: The current origin
@type current_origin: dns.name.Name object
@ivar relativize: should names in the zone be relativized?
@type relativize: bool
@ivar zone: the zone
@type zone: dns.zone.Zone object
@ivar saved_state: saved reader state (used when processing $INCLUDE)
@type saved_state: list of (tokenizer, current_origin, last_name, file)
tuples.
@ivar current_file: the file object of the $INCLUDed file being parsed
(None if no $INCLUDE is active).
@ivar allow_include: is $INCLUDE allowed?
@type allow_include: bool
@ivar check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
"""
def __init__(self, tok, origin, rdclass, relativize, zone_factory=Zone,
allow_include=False, check_origin=True):
if isinstance(origin, (str, unicode)):
origin = dns.name.from_text(origin)
self.tok = tok
self.current_origin = origin
self.relativize = relativize
self.ttl = 0
self.last_name = None
self.zone = zone_factory(origin, rdclass, relativize=relativize)
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.check_origin = check_origin
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _rr_line(self):
"""Process one line from a DNS master file."""
# Name
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading = True)
if not token.is_whitespace():
self.last_name = dns.name.from_text(token.value, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
except:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" % token.value)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" % (str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile("^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile("^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile("^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g1.groups()
if sign == '':
sign = '+'
width = g1.groups()[2]
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplemented
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
master file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
ttl = self.ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except:
rdclass = self.zone.rdclass
if rdclass != self.zone.rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# lhs (required)
try:
rhs = token.value
except:
raise dns.exception.SyntaxError
lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == '+':
lindex = i + int(loffset)
elif lsign == '-':
lindex = i - int(loffset)
if rsign == '-':
rindex = i - int(roffset)
elif rsign == '+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace('$%s' % (lmod), lzfindex)
rdata = rhs.replace('$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin)
name = self.last_name
if not name.is_subdomain(self.zone.origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone.origin)
n = self.zone.nodes.get(name)
if n is None:
n = self.zone.node_factory()
self.zone.nodes[name] = n
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, False)
except dns.exception.SyntaxError:
# Catch and reraise.
(ty, va) = sys.exc_info()[:2]
raise va
except:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
rd.choose_relativity(self.zone.origin, self.relativize)
covers = rd.covers()
rds = n.find_rdataset(rdclass, rdtype, covers, True)
rds.add(rd, ttl)
def read(self):
"""Read a DNS master file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True).unescape()
if token.is_eof():
if not self.current_file is None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$':
u = token.value.upper()
if u == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.ttl = dns.ttl.from_text(token.value)
self.tok.get_eol()
elif u == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone.origin is None:
self.zone.origin = self.current_origin
elif u == '$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin = dns.name.from_text(token.value, \
self.current_origin)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError("bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.ttl))
self.current_file = file(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif u == '$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError("Unknown master file directive '" + u + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError, detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
raise dns.exception.SyntaxError("%s:%d: %s" % (filename, line_number, detail))
# Now that we're done reading, do some basic checking of the zone.
if self.check_origin:
self.zone.check_origin()
def from_text(text, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=False, check_origin=True):
"""Build a zone object from a master file format string.
@param text: the master file format input
@type text: string.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<string>'.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
# 'text' can also be a file, but we don't publish that fact
# since it's an implementation detail. The official file
# interface is from_file().
if filename is None:
filename = '<string>'
tok = dns.tokenizer.Tokenizer(text, filename)
reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory,
allow_include=allow_include,
check_origin=check_origin)
reader.read()
return reader.zone
def from_file(f, origin = None, rdclass = dns.rdataclass.IN,
relativize = True, zone_factory=Zone, filename=None,
allow_include=True, check_origin=True):
"""Read a master file and build a zone object.
@param f: file or string. If I{f} is a string, it is treated
as the name of a file to open.
@param origin: The origin of the zone; if not specified, the first
$ORIGIN statement in the master file will determine the origin of the
zone.
@type origin: dns.name.Name object or string
@param rdclass: The zone's rdata class; the default is class IN.
@type rdclass: int
@param relativize: should names be relativized? The default is True
@type relativize: bool
@param zone_factory: The zone factory to use
@type zone_factory: function returning a Zone
@param filename: The filename to emit when describing where an error
occurred; the default is '<file>', or the value of I{f} if I{f} is a
string.
@type filename: string
@param allow_include: is $INCLUDE allowed?
@type allow_include: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
if sys.hexversion >= 0x02030000:
# allow Unicode filenames; turn on universal newline support
str_type = basestring
opts = 'rU'
else:
str_type = str
opts = 'r'
if isinstance(f, str_type):
if filename is None:
filename = f
f = file(f, opts)
want_close = True
else:
if filename is None:
filename = '<file>'
want_close = False
try:
z = from_text(f, origin, rdclass, relativize, zone_factory,
filename, allow_include, check_origin)
finally:
if want_close:
f.close()
return z
def from_xfr(xfr, zone_factory=Zone, relativize=True, check_origin=True):
"""Convert the output of a zone transfer generator into a zone object.
@param xfr: The xfr generator
@type xfr: generator of dns.message.Message objects
@param relativize: should names be relativized? The default is True.
It is essential that the relativize setting matches the one specified
to dns.query.xfr().
@type relativize: bool
@param check_origin: should sanity checks of the origin node be done?
The default is True.
@type check_origin: bool
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
@rtype: dns.zone.Zone object
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
if check_origin:
z.check_origin()
return z
| mit |
harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/ec2/bundleinstance.py | 18 | 2754 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Bundle Task
"""
from boto.ec2.ec2object import EC2Object
class BundleInstanceTask(EC2Object):
def __init__(self, connection=None):
super(BundleInstanceTask, self).__init__(connection)
self.id = None
self.instance_id = None
self.progress = None
self.start_time = None
self.state = None
self.bucket = None
self.prefix = None
self.upload_policy = None
self.upload_policy_signature = None
self.update_time = None
self.code = None
self.message = None
def __repr__(self):
return 'BundleInstanceTask:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'bundleId':
self.id = value
elif name == 'instanceId':
self.instance_id = value
elif name == 'progress':
self.progress = value
elif name == 'startTime':
self.start_time = value
elif name == 'state':
self.state = value
elif name == 'bucket':
self.bucket = value
elif name == 'prefix':
self.prefix = value
elif name == 'uploadPolicy':
self.upload_policy = value
elif name == 'uploadPolicySignature':
self.upload_policy_signature = value
elif name == 'updateTime':
self.update_time = value
elif name == 'code':
self.code = value
elif name == 'message':
self.message = value
else:
setattr(self, name, value)
| gpl-3.0 |
NeovaHealth/odoo | addons/l10n_ve/__openerp__.py | 260 | 2960 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
##############################################################################
# Module programed and financed by:
# Vauxoo, C.A. (<http://vauxoo.com>).
# Our Community team mantain this module:
# https://launchpad.net/~openerp-venezuela
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Venezuela - Accounting',
'version': '1.0',
'author': ['OpenERP SA', 'Vauxoo'],
'category': 'Localization/Account Charts',
'description':
"""
Chart of Account for Venezuela.
===============================
Venezuela doesn't have any chart of account by law, but the default
proposed in OpenERP should comply with some Accepted best practices in Venezuela,
this plan comply with this practices.
This module has been tested as base for more of 1000 companies, because
it is based in a mixtures of most common software in the Venezuelan
market what will allow for sure to accountants feel them first steps with
OpenERP more confortable.
This module doesn't pretend be the total localization for Venezuela,
but it will help you to start really quickly with OpenERP in this country.
This module give you.
---------------------
- Basic taxes for Venezuela.
- Have basic data to run tests with community localization.
- Start a company from 0 if your needs are basic from an accounting PoV.
We recomend install account_anglo_saxon if you want valued your
stocks as Venezuela does with out invoices.
If you install this module, and select Custom chart a basic chart will be proposed,
but you will need set manually account defaults for taxes.
""",
'depends': ['account',
'base_vat',
'account_chart'
],
'demo': [],
'data': ['data/account_tax_code.xml',
'data/account_user_types.xml',
'data/account_chart.xml',
'data/account_tax.xml',
'data/l10n_chart_ve_wizard.xml'
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shreddd/newt-2.0 | authnz/adapters/template_adapter.py | 3 | 2418 | """Auth Adapter Template File
IMPORTANT: NOT A FUNCTIONAL ADAPTER. FUNCTIONS MUST BE IMPLEMENTED
Notes:
- Each of the functions defined below must return a json serializable
object, json_response, or valid HttpResponse object
- A json_response creates an HttpResponse object given parameters:
- content: string with the contents of the response
- status: string with the status of the response
- status_code: HTTP status code
- error: string with the error message if there is one
"""
from common.response import json_response
import logging
import re
logger = logging.getLogger("newt." + __name__)
def get_status(request):
"""Returns the current user status
Keyword arguments:
request -- Django HttpRequest
"""
pass
def login(request):
"""Logs the user in and returns the status
Keyword arguments:
request -- Django HttpRequest
"""
pass
def logout(request):
"""Logs the user out and returns the status
Keyword arguments:
request -- Django HttpRequest
"""
pass
"""A tuple list in the form of:
(
(compiled_regex_exp, associated_function, request_required),
...
)
Note: The compiled_regex_exp must have named groups corresponding to
the arguments of the associated_function
Note: if request_required is True, the associated_function must have
request as the first argument
Example:
patterns = (
(re.compile(r'/usage/(?P<path>.+)$'), get_usage, False),
(re.compile(r'/image/(?P<query>.+)$'), get_image, False),
(re.compile(r'/(?P<path>.+)$'), get_resource, False),
)
"""
patterns = (
)
def extras_router(request, query):
"""Maps a query to a function if the pattern matches and returns result
Keyword arguments:
request -- Django HttpRequest
query -- the query to be matched against
"""
for pattern, func, req in patterns:
match = pattern.match(query)
if match and req:
return func(request, **match.groupdict())
elif match:
return func(**match.groupdict())
# Returns an Unimplemented response if no pattern matches
return json_response(status="Unimplemented",
status_code=501,
error="",
content="query: %s" % query) | bsd-2-clause |
miptliot/edx-platform | common/lib/capa/capa/tests/test_customrender.py | 37 | 2295 | from lxml import etree
import unittest
import xml.sax.saxutils as saxutils
from capa.tests.helpers import test_capa_system
from capa import customrender
# just a handy shortcut
lookup_tag = customrender.registry.get_class_for_tag
def extract_context(xml):
"""
Given an xml element corresponding to the output of test_capa_system.render_template, get back the
original context
"""
return eval(xml.text)
def quote_attr(s):
return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes
class HelperTest(unittest.TestCase):
'''
Make sure that our helper function works!
'''
def check(self, d):
xml = etree.XML(test_capa_system().render_template('blah', d))
self.assertEqual(d, extract_context(xml))
def test_extract_context(self):
self.check({})
self.check({1, 2})
self.check({'id', 'an id'})
self.check({'with"quote', 'also"quote'})
class SolutionRenderTest(unittest.TestCase):
'''
Make sure solutions render properly.
'''
def test_rendering(self):
solution = 'To compute unicorns, count them.'
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
element = etree.fromstring(xml_str)
renderer = lookup_tag('solution')(test_capa_system(), element)
self.assertEqual(renderer.id, 'solution_12')
# Our test_capa_system "renders" templates to a div with the repr of the context.
xml = renderer.get_html()
context = extract_context(xml)
self.assertEqual(context, {'id': 'solution_12'})
class MathRenderTest(unittest.TestCase):
'''
Make sure math renders properly.
'''
def check_parse(self, latex_in, mathjax_out):
xml_str = """<math>{tex}</math>""".format(tex=latex_in)
element = etree.fromstring(xml_str)
renderer = lookup_tag('math')(test_capa_system(), element)
self.assertEqual(renderer.mathstr, mathjax_out)
def test_parsing(self):
self.check_parse('$abc$', '[mathjaxinline]abc[/mathjaxinline]')
self.check_parse('$abc', '$abc')
self.check_parse(r'$\displaystyle 2+2$', '[mathjax] 2+2[/mathjax]')
# NOTE: not testing get_html yet because I don't understand why it's doing what it's doing.
| agpl-3.0 |
anryko/ansible | test/units/modules/network/fortios/test_fortios_user_security_exempt_list.py | 21 | 7891 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_security_exempt_list
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_security_exempt_list.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_security_exempt_list_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_security_exempt_list': {
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_security_exempt_list_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_security_exempt_list': {
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_security_exempt_list_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_security_exempt_list': {
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_security_exempt_list_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_security_exempt_list': {
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'security-exempt-list', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_security_exempt_list_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_security_exempt_list': {
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_security_exempt_list_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_security_exempt_list': {
'random_attribute_not_valid': 'tag',
'description': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_user_security_exempt_list.fortios_user(input_data, fos_instance)
expected_data = {
'description': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('user', 'security-exempt-list', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
deandunbar/html2bwml | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
liuliwork/django | tests/model_options/models/default_related_name.py | 414 | 1056 | from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
class Editor(models.Model):
name = models.CharField(max_length=128)
bestselling_author = models.ForeignKey(Author, models.CASCADE)
class Book(models.Model):
title = models.CharField(max_length=128)
authors = models.ManyToManyField(Author)
editor = models.ForeignKey(Editor, models.CASCADE, related_name="edited_books")
class Meta:
default_related_name = "books"
class Store(models.Model):
name = models.CharField(max_length=128)
address = models.CharField(max_length=128)
class Meta:
abstract = True
default_related_name = "%(app_label)s_%(model_name)ss"
class BookStore(Store):
available_books = models.ManyToManyField(Book)
class EditorStore(Store):
editor = models.ForeignKey(Editor, models.CASCADE)
available_books = models.ManyToManyField(Book)
class Meta:
default_related_name = "editor_stores"
| bsd-3-clause |
rghe/ansible | lib/ansible/modules/network/panos/panos_security_rule.py | 16 | 19823 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, techbizdev <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_rule
short_description: Create security rule policy on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied,
the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPi U(https://pypi.org/project/pandevice/)
- xmltodict can be obtained from PyPi U(https://pypi.org/project/xmltodict/)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
default: 'add'
rule_name:
description:
- Name of the security rule.
required: true
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: "universal"
description:
description:
- Description for the security rule.
tag_name:
description:
- Administrative tags that can be added to the rule. Note, tags must be already defined.
source_zone:
description:
- List of source zones.
default: "any"
destination_zone:
description:
- List of destination zones.
default: "any"
source_ip:
description:
- List of source addresses.
default: "any"
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: "any"
hip_profiles:
description: >
- If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: "any"
destination_ip:
description:
- List of destination addresses.
default: "any"
application:
description:
- List of applications.
default: "any"
service:
description:
- List of services.
default: "application-default"
log_start:
description:
- Whether to log at session start.
log_end:
description:
- Whether to log at session end.
default: true
action:
description:
- Action to apply once rules maches.
default: "allow"
group_profile:
description: >
- Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
antivirus:
description:
- Name of the already defined antivirus profile.
vulnerability:
description:
- Name of the already defined vulnerability profile.
spyware:
description:
- Name of the already defined spyware profile.
url_filtering:
description:
- Name of the already defined url_filtering profile.
file_blocking:
description:
- Name of the already defined file_blocking profile.
data_filtering:
description:
- Name of the already defined data_filtering profile.
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
devicegroup:
description: >
- Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
commit:
description:
- Commit configuration if changed.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: add an SSH inbound rule to devicegroup
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'SSH permit'
description: 'SSH rule test'
tag_name: ['ProjectX']
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
devicegroup: 'Cloud Edge'
- name: add a rule to allow HTTP multimedia only from CDNs
panos_security_rule:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
operation: 'add'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
hip_profiles: ['any']
action: 'allow'
- name: add a more complex rule that uses security profiles
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'Allow HTTP w profile'
log_start: false
log_end: true
action: 'allow'
antivirus: 'default'
vulnerability: 'default'
spyware: 'default'
url_filtering: 'default'
wildfire_analysis: 'default'
- name: delete a devicegroup security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
rule_name: 'Allow telnet'
devicegroup: 'DC Firewalls'
- name: find a specific security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
password: '{{ password }}'
operation: 'find'
rule_name: 'Allow RDP to DCs'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, pandevice.firewall.Firewall):
rulebase = pandevice.policies.Rulebase()
device.add(rulebase)
elif isinstance(device, pandevice.panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def find_rule(rulebase, rule_name):
# Search for the rule name
rule = rulebase.find(rule_name)
if rule:
return rule
else:
return False
def rule_is_match(propose_rule, current_rule):
match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability',
'spyware', 'url_filtering', 'file_blocking', 'data_filtering',
'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end']
list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category',
'application', 'service', 'hip_profiles']
for check in match_check:
propose_check = getattr(propose_rule, check, None)
current_check = getattr(current_rule, check, None)
if propose_check != current_check:
return False
for check in list_check:
propose_check = getattr(propose_rule, check, [])
current_check = getattr(current_rule, check, [])
if set(propose_check) != set(current_check):
return False
return True
def create_security_rule(**kwargs):
security_rule = policies.SecurityRule(
name=kwargs['rule_name'],
description=kwargs['description'],
fromzone=kwargs['source_zone'],
source=kwargs['source_ip'],
source_user=kwargs['source_user'],
hip_profiles=kwargs['hip_profiles'],
tozone=kwargs['destination_zone'],
destination=kwargs['destination_ip'],
application=kwargs['application'],
service=kwargs['service'],
category=kwargs['category'],
log_start=kwargs['log_start'],
log_end=kwargs['log_end'],
action=kwargs['action'],
type=kwargs['rule_type']
)
if 'tag_name' in kwargs:
security_rule.tag = kwargs['tag_name']
# profile settings
if 'group_profile' in kwargs:
security_rule.group = kwargs['group_profile']
else:
if 'antivirus' in kwargs:
security_rule.virus = kwargs['antivirus']
if 'vulnerability' in kwargs:
security_rule.vulnerability = kwargs['vulnerability']
if 'spyware' in kwargs:
security_rule.spyware = kwargs['spyware']
if 'url_filtering' in kwargs:
security_rule.url_filtering = kwargs['url_filtering']
if 'file_blocking' in kwargs:
security_rule.file_blocking = kwargs['file_blocking']
if 'data_filtering' in kwargs:
security_rule.data_filtering = kwargs['data_filtering']
if 'wildfire_analysis' in kwargs:
security_rule.wildfire_analysis = kwargs['wildfire_analysis']
return security_rule
def add_rule(rulebase, sec_rule):
if rulebase:
rulebase.add(sec_rule)
sec_rule.create()
return True
else:
return False
def update_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.apply()
return True
else:
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(default='add', choices=['add', 'update', 'delete', 'find']),
rule_name=dict(required=True),
description=dict(default=''),
tag_name=dict(type='list'),
destination_zone=dict(type='list', default=['any']),
source_zone=dict(type='list', default=['any']),
source_ip=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
destination_ip=dict(type='list', default=["any"]),
category=dict(type='list', default=['any']),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
hip_profiles=dict(type='list', default=['any']),
group_profile=dict(),
antivirus=dict(),
vulnerability=dict(),
spyware=dict(),
url_filtering=dict(),
file_blocking=dict(),
data_filtering=dict(),
wildfire_analysis=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
rule_type=dict(default='universal'),
action=dict(default='allow'),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
rule_name = module.params['rule_name']
description = module.params['description']
tag_name = module.params['tag_name']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
hip_profiles = module.params['hip_profiles']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
application = module.params['application']
service = module.params['service']
category = module.params['category']
log_start = module.params['log_start']
log_end = module.params['log_end']
action = module.params['action']
group_profile = module.params['group_profile']
antivirus = module.params['antivirus']
vulnerability = module.params['vulnerability']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
file_blocking = module.params['file_blocking']
data_filtering = module.params['data_filtering']
wildfire_analysis = module.params['wildfire_analysis']
rule_type = module.params['rule_type']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# Get the rulebase
rulebase = get_rulebase(device, dev_group)
# Which action shall we take on the object?
if operation == "find":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Rule matched'
)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "delete":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, delete it
if match:
try:
if commit:
match.delete()
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "add":
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
# Search for the rule. Fail if found.
match = find_rule(rulebase, rule_name)
if match:
if rule_is_match(match, new_rule):
module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name)
else:
try:
changed = add_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name)
elif operation == 'update':
# Search for the rule. Update if found.
match = find_rule(rulebase, rule_name)
if match:
try:
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
changed = update_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name)
if __name__ == '__main__':
main()
| gpl-3.0 |
Jai-Chaudhary/termite-data-server | web2py/scripts/tickets2db.py | 38 | 1391 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import time
import stat
import datetime
from gluon.utils import md5_hash
from gluon.restricted import RestrictedError, TicketStorage
from gluon import DAL
SLEEP_MINUTES = 5
errors_path = os.path.join(request.folder, 'errors')
try:
db_string = open(os.path.join(request.folder, 'private', 'ticket_storage.txt')).read().replace('\r', '').replace('\n', '').strip()
except:
db_string = 'sqlite://storage.db'
db_path = os.path.join(request.folder, 'databases')
tk_db = DAL(db_string, folder=db_path, auto_import=True)
ts = TicketStorage(db=tk_db)
tk_table = ts._get_table(
db=tk_db, tablename=ts.tablename, app=request.application)
hashes = {}
while 1:
if request.tickets_db:
print "You're storing tickets yet in database"
sys.exit(1)
for file in os.listdir(errors_path):
filename = os.path.join(errors_path, file)
modified_time = os.stat(filename)[stat.ST_MTIME]
modified_time = datetime.datetime.fromtimestamp(modified_time)
ticket_id = file
ticket_data = open(filename).read()
tk_table.insert(ticket_id=ticket_id,
ticket_data=ticket_data,
created_datetime=modified_time
)
tk_db.commit()
os.unlink(filename)
time.sleep(SLEEP_MINUTES * 60)
| bsd-3-clause |
proxysh/Safejumper-for-Mac | buildlinux/env32/lib/python2.7/site-packages/cffi/cffi_opcode.py | 64 | 5477 |
class CffiOp(object):
def __init__(self, op, arg):
self.op = op
self.arg = arg
def as_c_expr(self):
if self.op is None:
assert isinstance(self.arg, str)
return '(_cffi_opcode_t)(%s)' % (self.arg,)
classname = CLASS_NAME[self.op]
return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg)
def as_python_bytes(self):
if self.op is None and self.arg.isdigit():
value = int(self.arg) # non-negative: '-' not in self.arg
if value >= 2**31:
raise OverflowError("cannot emit %r: limited to 2**31-1"
% (self.arg,))
return format_four_bytes(value)
if isinstance(self.arg, str):
from .ffiplatform import VerificationError
raise VerificationError("cannot emit to Python: %r" % (self.arg,))
return format_four_bytes((self.arg << 8) | self.op)
def __str__(self):
classname = CLASS_NAME.get(self.op, self.op)
return '(%s %s)' % (classname, self.arg)
def format_four_bytes(num):
return '\\x%02X\\x%02X\\x%02X\\x%02X' % (
(num >> 24) & 0xFF,
(num >> 16) & 0xFF,
(num >> 8) & 0xFF,
(num ) & 0xFF)
OP_PRIMITIVE = 1
OP_POINTER = 3
OP_ARRAY = 5
OP_OPEN_ARRAY = 7
OP_STRUCT_UNION = 9
OP_ENUM = 11
OP_FUNCTION = 13
OP_FUNCTION_END = 15
OP_NOOP = 17
OP_BITFIELD = 19
OP_TYPENAME = 21
OP_CPYTHON_BLTN_V = 23 # varargs
OP_CPYTHON_BLTN_N = 25 # noargs
OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg)
OP_CONSTANT = 29
OP_CONSTANT_INT = 31
OP_GLOBAL_VAR = 33
OP_DLOPEN_FUNC = 35
OP_DLOPEN_CONST = 37
OP_GLOBAL_VAR_F = 39
OP_EXTERN_PYTHON = 41
PRIM_VOID = 0
PRIM_BOOL = 1
PRIM_CHAR = 2
PRIM_SCHAR = 3
PRIM_UCHAR = 4
PRIM_SHORT = 5
PRIM_USHORT = 6
PRIM_INT = 7
PRIM_UINT = 8
PRIM_LONG = 9
PRIM_ULONG = 10
PRIM_LONGLONG = 11
PRIM_ULONGLONG = 12
PRIM_FLOAT = 13
PRIM_DOUBLE = 14
PRIM_LONGDOUBLE = 15
PRIM_WCHAR = 16
PRIM_INT8 = 17
PRIM_UINT8 = 18
PRIM_INT16 = 19
PRIM_UINT16 = 20
PRIM_INT32 = 21
PRIM_UINT32 = 22
PRIM_INT64 = 23
PRIM_UINT64 = 24
PRIM_INTPTR = 25
PRIM_UINTPTR = 26
PRIM_PTRDIFF = 27
PRIM_SIZE = 28
PRIM_SSIZE = 29
PRIM_INT_LEAST8 = 30
PRIM_UINT_LEAST8 = 31
PRIM_INT_LEAST16 = 32
PRIM_UINT_LEAST16 = 33
PRIM_INT_LEAST32 = 34
PRIM_UINT_LEAST32 = 35
PRIM_INT_LEAST64 = 36
PRIM_UINT_LEAST64 = 37
PRIM_INT_FAST8 = 38
PRIM_UINT_FAST8 = 39
PRIM_INT_FAST16 = 40
PRIM_UINT_FAST16 = 41
PRIM_INT_FAST32 = 42
PRIM_UINT_FAST32 = 43
PRIM_INT_FAST64 = 44
PRIM_UINT_FAST64 = 45
PRIM_INTMAX = 46
PRIM_UINTMAX = 47
_NUM_PRIM = 48
_UNKNOWN_PRIM = -1
_UNKNOWN_FLOAT_PRIM = -2
_UNKNOWN_LONG_DOUBLE = -3
_IO_FILE_STRUCT = -1
PRIMITIVE_TO_INDEX = {
'char': PRIM_CHAR,
'short': PRIM_SHORT,
'int': PRIM_INT,
'long': PRIM_LONG,
'long long': PRIM_LONGLONG,
'signed char': PRIM_SCHAR,
'unsigned char': PRIM_UCHAR,
'unsigned short': PRIM_USHORT,
'unsigned int': PRIM_UINT,
'unsigned long': PRIM_ULONG,
'unsigned long long': PRIM_ULONGLONG,
'float': PRIM_FLOAT,
'double': PRIM_DOUBLE,
'long double': PRIM_LONGDOUBLE,
'_Bool': PRIM_BOOL,
'wchar_t': PRIM_WCHAR,
'int8_t': PRIM_INT8,
'uint8_t': PRIM_UINT8,
'int16_t': PRIM_INT16,
'uint16_t': PRIM_UINT16,
'int32_t': PRIM_INT32,
'uint32_t': PRIM_UINT32,
'int64_t': PRIM_INT64,
'uint64_t': PRIM_UINT64,
'intptr_t': PRIM_INTPTR,
'uintptr_t': PRIM_UINTPTR,
'ptrdiff_t': PRIM_PTRDIFF,
'size_t': PRIM_SIZE,
'ssize_t': PRIM_SSIZE,
'int_least8_t': PRIM_INT_LEAST8,
'uint_least8_t': PRIM_UINT_LEAST8,
'int_least16_t': PRIM_INT_LEAST16,
'uint_least16_t': PRIM_UINT_LEAST16,
'int_least32_t': PRIM_INT_LEAST32,
'uint_least32_t': PRIM_UINT_LEAST32,
'int_least64_t': PRIM_INT_LEAST64,
'uint_least64_t': PRIM_UINT_LEAST64,
'int_fast8_t': PRIM_INT_FAST8,
'uint_fast8_t': PRIM_UINT_FAST8,
'int_fast16_t': PRIM_INT_FAST16,
'uint_fast16_t': PRIM_UINT_FAST16,
'int_fast32_t': PRIM_INT_FAST32,
'uint_fast32_t': PRIM_UINT_FAST32,
'int_fast64_t': PRIM_INT_FAST64,
'uint_fast64_t': PRIM_UINT_FAST64,
'intmax_t': PRIM_INTMAX,
'uintmax_t': PRIM_UINTMAX,
}
F_UNION = 0x01
F_CHECK_FIELDS = 0x02
F_PACKED = 0x04
F_EXTERNAL = 0x08
F_OPAQUE = 0x10
G_FLAGS = dict([('_CFFI_' + _key, globals()[_key])
for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED',
'F_EXTERNAL', 'F_OPAQUE']])
CLASS_NAME = {}
for _name, _value in list(globals().items()):
if _name.startswith('OP_') and isinstance(_value, int):
CLASS_NAME[_value] = _name[3:]
| gpl-2.0 |
buckket/weltklang | lib/rfk/xmpp_bot.py | 4 | 3793 | import ast
import sys
import logging
import click
from redis import StrictRedis
from redis.exceptions import ConnectionError
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import XMPPError
import rfk
from rfk.helper import get_path
import rfk.helper.daemonize
class RfKBot(ClientXMPP):
def __init__(self, jid, password):
super(RfKBot, self).__init__(jid, password)
self.add_event_handler('session_start', self.start)
self.register_plugin('xep_0004') # Data Forms
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0060') # Publish-Subscribe
self.register_plugin('xep_0115') # Entity Capabilities
self.register_plugin('xep_0118') # User Tune
self.register_plugin('xep_0128') # Service Discovery Extensions
self.register_plugin('xep_0163') # Personal Eventing Protocol
self.register_plugin('xep_0199') # XMPP Ping
self.auto_authorize = True
self.auto_subscribe = True
def start(self, event):
self.send_presence()
self.get_roster()
self['xep_0115'].update_caps()
def send_messages(self, data):
try:
for recipient in data['recipients']:
logging.info('Sending message to {}'.format(recipient))
self.send_message(recipient, data['message'])
return True
except (KeyError, XMPPError):
return False
def update_tune(self, data):
try:
if data['tune']:
(artist, title) = (data['tune']['artist'], data['tune']['title'])
logging.info('Updating tune: {} - {}'.format(artist, title))
self['xep_0118'].publish_tune(artist=artist, title=title)
else:
logging.info('Updating tune: None')
self['xep_0118'].stop()
return True
except (KeyError, XMPPError):
return False
@click.command()
@click.option('-j', '--jid', help='JID to use')
@click.option('-p', '--password', help='password to use', hide_input=True)
@click.option('-f', '--foreground', help='run in foreground', is_flag=True, default=False)
def main(jid, password, foreground):
rfk.init(enable_geoip=False)
if not jid:
jid = rfk.CONFIG.get('xmpp', 'jid')
if not password:
password = rfk.CONFIG.get('xmpp', 'password')
if not foreground:
rfk.helper.daemonize.createDaemon(get_path())
# Setup logging
logging.basicConfig(level=logging.INFO, format='%(levelname)-8s %(message)s')
# Setup XMPP instance
xmpp = RfKBot(jid, password)
# Connect to the XMPP server and start processing XMPP stanzas
if xmpp.connect():
xmpp.process(block=False)
def message_handler(message):
if message and message['type'] == 'message':
data = ast.literal_eval(message['data'])
try:
if data['type'] == 'message':
xmpp.send_messages(data)
elif data['type'] == 'tune':
xmpp.update_tune(data)
except (KeyError, TypeError) as err:
logging.error('message_handler error: {}'.format(err))
try:
redis_client = StrictRedis(host='localhost', port=6379, decode_responses=True)
redis_pubsub = redis_client.pubsub(ignore_subscribe_messages=True)
redis_pubsub.subscribe('rfk-xmpp')
for message in redis_pubsub.listen():
message_handler(message)
except (ConnectionError, KeyboardInterrupt):
xmpp.disconnect(wait=True)
return False
else:
return False
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
kobolabs/calibre | src/calibre/ebooks/pdb/header.py | 24 | 2801 | # -*- coding: utf-8 -*-
'''
Read the header data from a pdb file.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
import struct
import time
class PdbHeaderReader(object):
def __init__(self, stream):
self.stream = stream
self.ident = self.identity()
self.num_sections = self.section_count()
self.title = self.name()
def identity(self):
self.stream.seek(60)
ident = self.stream.read(8)
return ident
def section_count(self):
self.stream.seek(76)
return struct.unpack('>H', self.stream.read(2))[0]
def name(self):
self.stream.seek(0)
return re.sub('[^-A-Za-z0-9 ]+', '_', self.stream.read(32).replace('\x00', ''))
def full_section_info(self, number):
if number not in range(0, self.num_sections):
raise ValueError('Not a valid section number %i' % number)
self.stream.seek(78 + number * 8)
offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', self.stream.read(8))[0]
flags, val = a1, a2 << 16 | a3 << 8 | a4
return (offset, flags, val)
def section_offset(self, number):
if number not in range(0, self.num_sections):
raise ValueError('Not a valid section number %i' % number)
self.stream.seek(78 + number * 8)
return struct.unpack('>LBBBB', self.stream.read(8))[0]
def section_data(self, number):
if number not in range(0, self.num_sections):
raise ValueError('Not a valid section number %i' % number)
start = self.section_offset(number)
if number == self.num_sections -1:
self.stream.seek(0, 2)
end = self.stream.tell()
else:
end = self.section_offset(number + 1)
self.stream.seek(start)
return self.stream.read(end - start)
class PdbHeaderBuilder(object):
def __init__(self, identity, title):
self.identity = identity.ljust(3, '\x00')[:8]
self.title = '%s\x00' % re.sub('[^-A-Za-z0-9 ]+', '_', title).ljust(31, '\x00')[:31].encode('ascii', 'replace')
def build_header(self, section_lengths, out_stream):
'''
section_lengths = Lenght of each section in file.
'''
now = int(time.time())
nrecords = len(section_lengths)
out_stream.write(self.title + struct.pack('>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0))
out_stream.write(self.identity + struct.pack('>IIH', nrecords, 0, nrecords))
offset = 78 + (8 * nrecords) + 2
for id, record in enumerate(section_lengths):
out_stream.write(struct.pack('>LBBBB', long(offset), 0, 0, 0, 0))
offset += record
out_stream.write('\x00\x00')
| gpl-3.0 |
sventech/YAK-server | test_project/test_app/tests/test_social.py | 1 | 10812 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from test_project.test_app.models import Post
from test_project.test_app.tests.factories import UserFactory, PostFactory, CommentFactory
from yak.rest_core.test import SchemaTestCase
from yak.rest_social_network.models import Follow, Comment, Tag, Like
User = get_user_model()
class BaseAPITests(SchemaTestCase):
def setUp(self):
super(BaseAPITests, self).setUp()
self.dev_user = UserFactory()
class FlagTestCase(BaseAPITests):
def test_users_can_flag_content(self):
test_user = UserFactory()
content_type = ContentType.objects.get_for_model(Post)
flag_url = reverse('flag')
data = {
'content_type': content_type.pk,
'object_id': PostFactory().pk
}
self.assertSchemaPost(flag_url, "$flagRequest", "$flagResponse", data, test_user)
class ShareTestCase(BaseAPITests):
def test_users_can_share_content(self):
test_user = UserFactory()
content_type = ContentType.objects.get_for_model(Post)
shares_url = reverse('shares-list')
data = {
'content_type': content_type.pk,
'object_id': PostFactory().pk,
'shared_with': [test_user.pk]
}
self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, self.dev_user)
def test_users_can_share_content_multiple_times(self):
sharing_user = UserFactory()
test_user = UserFactory()
content_type = ContentType.objects.get_for_model(Post)
shares_url = reverse('shares-list')
data = {
'content_type': content_type.pk,
'object_id': PostFactory().pk,
'shared_with': [test_user.pk]
}
self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, sharing_user)
data['shared_with'] = [self.dev_user.pk]
self.assertSchemaPost(shares_url, "$shareRequest", "$shareResponse", data, sharing_user)
class LikeTestCase(BaseAPITests):
def test_users_can_like_content(self):
content_type = ContentType.objects.get_for_model(Post)
likes_url = reverse('likes-list')
data = {
'content_type': content_type.pk,
'object_id': PostFactory().pk,
}
self.assertSchemaPost(likes_url, "$likeRequest", "$likeResponse", data, self.dev_user)
def test_liked_mixin(self):
post = PostFactory()
url = reverse("posts-detail", args=[post.pk])
like = Like.objects.create(content_type=ContentType.objects.get_for_model(Post), object_id=post.pk,
user=self.dev_user)
response = self.assertSchemaGet(url, None, "$postResponse", self.dev_user)
self.assertEqual(response.data["liked_id"], like.pk)
other_post = PostFactory()
url = reverse("posts-detail", args=[other_post.pk])
response = self.assertSchemaGet(url, None, "$postResponse", self.dev_user)
self.assertIsNone(response.data["liked_id"])
class CommentTestCase(BaseAPITests):
def test_users_can_comment_on_content(self):
content_type = ContentType.objects.get_for_model(Post)
comments_url = reverse('comments-list')
data = {
'content_type': content_type.pk,
'object_id': PostFactory().pk,
'description': 'This is a user comment.'
}
self.assertSchemaPost(comments_url, "$commentRequest", "$commentResponse", data, self.dev_user)
def test_comment_related_tags(self):
content_type = ContentType.objects.get_for_model(Post)
Comment.objects.create(content_type=content_type,
object_id=1,
description='Testing of a hashtag. #django',
user=self.dev_user)
tags_url = reverse('tags-list')
response = self.assertSchemaGet(tags_url, None, "$tagResponse", self.dev_user)
self.assertEqual(response.data['results'][0]['name'], 'django')
self.assertIsNotNone(Tag.objects.get(name='django'))
def test_comments_for_specific_object(self):
test_user = UserFactory()
post_content_type = ContentType.objects.get_for_model(Post)
post = PostFactory(user=test_user)
comment = CommentFactory(content_type=post_content_type, object_id=post.pk)
post2 = PostFactory(user=test_user)
CommentFactory(content_type=post_content_type, object_id=post2.pk)
url = reverse('comments-list')
parameters = {
'content_type': post_content_type.pk,
'object_id': post.pk,
}
response = self.assertSchemaGet(url, parameters, "$commentResponse", self.dev_user)
self.assertEqual(len(response.data["results"]), 1)
self.assertEqual(response.data["results"][0]["id"], comment.pk)
class UserFollowingTestCase(BaseAPITests):
def test_user_can_follow_each_other(self):
test_user1 = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
follow_url = reverse('follows-list')
# Dev User to follow Test User 1
data = {
'content_type': user_content_type.pk,
'object_id': test_user1.pk
}
response = self.assertSchemaPost(follow_url, "$followRequest", "$followResponse", data, self.dev_user)
self.assertEqual(response.data['following']['username'], test_user1.username)
def test_following_endpoint(self):
test_user1 = UserFactory()
test_user2 = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
# Dev User to follow User 1, User 2 to follow Dev User
Follow.objects.create(content_type=user_content_type, object_id=test_user1.pk, user=self.dev_user)
Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=test_user2)
following_url = reverse('users-following', args=[self.dev_user.pk])
response = self.assertSchemaGet(following_url, None, "$followResponse", self.dev_user)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['following']['username'], test_user1.username)
def test_follower_endpoint(self):
test_user1 = UserFactory()
test_user2 = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
# Dev User to follow User 1, User 2 to follow Dev User
Follow.objects.create(content_type=user_content_type, object_id=test_user1.pk, user=self.dev_user)
Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=test_user2)
followers_url = reverse('users-followers', args=[self.dev_user.pk])
response = self.assertSchemaGet(followers_url, None, "$followResponse", self.dev_user)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['follower']['username'], test_user2.username)
def test_follow_pagination(self):
user_content_type = ContentType.objects.get_for_model(User)
for _ in range(0, 30):
user = UserFactory()
Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=user)
followers_url = reverse('users-followers', args=[self.dev_user.pk])
response = self.assertSchemaGet(followers_url, None, "$followResponse", self.dev_user)
self.assertEqual(len(response.data), settings.REST_FRAMEWORK['PAGE_SIZE'])
response = self.assertSchemaGet(followers_url, {"page": 2}, "$followResponse", self.dev_user)
self.assertEqual(len(response.data), 30 - settings.REST_FRAMEWORK['PAGE_SIZE'])
def test_user_can_unfollow_user(self):
follower = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
follow_object = Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower)
follows_url = reverse('follows-detail', kwargs={'pk': follow_object.pk})
# If you are not the follower of the user, you cannot unfollow the user
self.assertSchemaDelete(follows_url, self.dev_user, unauthorized=True)
# If you are the follower of that user, you can unfollow the user
self.assertSchemaDelete(follows_url, follower)
# Check that original follow object no longer exists
self.assertEqual(Follow.objects.filter(pk=follow_object.pk).exists(), False)
def test_user_following_and_follower_count(self):
follower1 = UserFactory()
follower2 = UserFactory()
following = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
# Follower setup
Follow.objects.create(content_type=user_content_type, object_id=following.pk, user=self.dev_user)
Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower1)
Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower2)
users_url = reverse('users-detail', kwargs={'pk': self.dev_user.pk})
response = self.assertSchemaGet(users_url, None, "$userResponse", self.dev_user)
self.assertEqual(response.data['user_following_count'], 1)
self.assertEqual(response.data['user_followers_count'], 2)
def test_bulk_follow(self):
user1 = UserFactory()
user2 = UserFactory()
url = reverse('follows-bulk-create')
user_content_type = ContentType.objects.get_for_model(User)
data = [
{'content_type': user_content_type.pk, 'object_id': user1.pk},
{'content_type': user_content_type.pk, 'object_id': user2.pk}
]
self.assertSchemaPost(url, "$followRequest", "$followResponse", data, self.dev_user)
self.assertEqual(user1.user_followers_count(), 1)
self.assertEqual(user2.user_followers_count(), 1)
def test_follow_id(self):
follower = UserFactory()
user_content_type = ContentType.objects.get_for_model(User)
follow_object = Follow.objects.create(content_type=user_content_type, object_id=self.dev_user.pk, user=follower)
url = reverse("users-detail", args=[self.dev_user.pk])
response = self.assertSchemaGet(url, None, "$userResponse", follower)
self.assertEqual(response.data['follow_id'], follow_object.pk)
not_follower = UserFactory()
url = reverse("users-detail", args=[self.dev_user.pk])
response = self.assertSchemaGet(url, None, "$userResponse", not_follower)
self.assertIsNone(response.data['follow_id'])
| mit |
jwhonce/openshift-ansible | roles/lib_utils/library/oo_azure_rm_publish_image.py | 3 | 10040 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# Copyright 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # noqa: F401
# import httplib
import json
import os
import time
import requests
from ansible.module_utils.basic import AnsibleModule
class AzurePublisherException(Exception):
'''Exception class for AzurePublisher'''
pass
class AzurePublisher(object):
'''Python class to represent the Azure Publishing portal https://cloudpartner.azure.com'''
# pylint: disable=too-many-arguments
def __init__(self,
publisher_id,
client_info,
ssl_verify=True,
api_version='2017-10-31',
debug=False):
'''
:publisher_id: string of the publisher id
:client_info: a dict containing the client_id, client_secret to get an access_token
'''
self._azure_server = 'https://cloudpartner.azure.com/api/publishers/{}'.format(publisher_id)
self.client_info = client_info
self.ssl_verify = ssl_verify
self.api_version = 'api-version={}'.format(api_version)
self.debug = debug
# if self.debug:
# import httplib
# httplib.HTTPSConnection.debuglevel = 1
# httplib.HTTPConnection.debuglevel = 1
self._access_token = None
@property
def server(self):
'''property for server url'''
return self._azure_server
@property
def token(self):
'''property for the access_token
curl --data-urlencode "client_id=$AZURE_CLIENT_ID" \
--data-urlencode "client_secret=$AZURE_CLIENT_SECRET" \
--data-urlencode "grant_type=client_credentials" \
--data-urlencode "resource=https://cloudpartner.azure.com" \
https://login.microsoftonline.com/$AZURE_TENANT_ID/oauth2/token
'''
if self._access_token is None:
url = 'https://login.microsoftonline.com/{}/oauth2/token'.format(self.client_info['tenant_id'])
data = {
'client_id': {self.client_info['client_id']},
'client_secret': self.client_info['client_secret'],
'grant_type': 'client_credentials',
'resource': 'https://cloudpartner.azure.com'
}
results = AzurePublisher.request('POST', url, data, {})
jres = results.json()
self._access_token = jres['access_token']
return self._access_token
def get_offers(self, offer=None, version=None, slot='preview'):
''' fetch all offers by publisherid '''
url = '/offers'
if offer is not None:
url += '/{}'.format(offer)
if version is not None:
url += '/versions/{}'.format(version)
if slot == 'preview':
url += '/slot/{}'.format(slot)
url += '?{}'.format(self.api_version)
return self.prepare_action(url)
def get_operations(self, offer, operation=None, status=None):
''' create or modify an offer '''
url = '/offers/{0}/submissions'.format(offer)
if operation is not None:
url += '/operations/{0}'.format(operation)
if not url.endswith('/'):
url += '/'
url += '?{0}'.format(self.api_version)
if status is not None:
url += '&status={0}'.format(status)
return self.prepare_action(url, 'GET')
def cancel_operation(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/cancel?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def publish(self, offer, emails):
''' publish an offer '''
url = '/offers/{0}/publish?{1}'.format(offer, self.api_version)
data = {
'metadata': {
'notification-emails': ','.join(emails),
}
}
return self.prepare_action(url, 'POST', data=data)
def go_live(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/golive?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def create_or_modify_offer(self, offer, data=None, modify=False):
''' create or modify an offer '''
url = '/offers/{0}?{1}'.format(offer, self.api_version)
headers = None
if modify:
headers = {
'If-Match': '*',
}
return self.prepare_action(url, 'PUT', data=data, add_headers=headers)
def prepare_action(self, url, action='GET', data=None, add_headers=None):
'''perform the http request
:action: string of either GET|POST
'''
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.token)
}
if add_headers is not None:
headers.update(add_headers)
if data is None:
data = ''
else:
data = json.dumps(data)
return AzurePublisher.request(action.upper(), self.server + url, data, headers)
def cancel_and_wait_for_operation(self, params):
'''cancel the current publish operation and wait for operation to complete'''
# cancel the publish operation
self.cancel_operation(offer=params['offer'])
# we need to wait here for 'submissionState' to move to 'canceled'
while True:
# fetch operations
ops = self.get_operations(params['offer'])
if self.debug:
print(ops.json())
if ops.json()[0]['submissionState'] == 'canceled':
break
time.sleep(5)
return ops
def manage_offer(self, params):
''' handle creating or modifying offers'''
# fetch the offer to verify it exists:
results = self.get_offers(offer=params['offer'])
if results.status_code == 200 and params['force']:
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'], modify=True)
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'])
@staticmethod
def request(action, url, data=None, headers=None, ssl_verify=True):
req = requests.Request(action.upper(), url, data=data, headers=headers)
session = requests.Session()
req_prep = session.prepare_request(req)
response = session.send(req_prep, verify=ssl_verify)
return response
@staticmethod
def run_ansible(params):
'''perform the ansible operations'''
client_info = {
'tenant_id': params['tenant_id'],
'client_id': params['client_id'],
'client_secret': params['client_secret']}
apc = AzurePublisher(params['publisher'],
client_info,
debug=params['debug'])
if params['state'] == 'offer':
results = apc.manage_offer(params)
elif params['state'] == 'publish':
results = apc.publish(offer=params['offer'], emails=params['emails'])
results.json = lambda: ''
elif params['state'] == 'cancel_op':
results = apc.cancel_and_wait_for_operation(params)
elif params['state'] == 'go_live':
results = apc.go_live(offer=params['offer'])
else:
raise AzurePublisherException('Unsupported query type: {}'.format(params['state']))
changed = False
if results.status_code in [200, 201, 202]:
changed = True
return {'data': results.json(), 'changed': changed, 'status_code': results.status_code}
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='offer', choices=['offer', 'cancel_op', 'go_live', 'publish']),
force=dict(default=False, type='bool'),
publisher=dict(default='redhat', type='str'),
debug=dict(default=False, type='bool'),
tenant_id=dict(default=os.environ.get('AZURE_TENANT_ID'), type='str'),
client_id=dict(default=os.environ.get('AZURE_CLIENT_ID'), type='str'),
client_secret=dict(default=os.environ.get('AZURE_CLIENT_SECRET'), type='str'),
offer=dict(default=None, type='str'),
offer_data=dict(default=None, type='dict'),
emails=dict(default=None, type='list'),
),
required_if=[
["state", "offer", ["offer_data"]],
],
)
# Verify we recieved either a valid key or edits with valid keys when receiving a src file.
# A valid key being not None or not ''.
if (module.params['tenant_id'] is None or module.params['client_id'] is None or
module.params['client_secret'] is None):
return module.fail_json(**{'failed': True,
'msg': 'Please specify tenant_id, client_id, and client_secret'})
rval = AzurePublisher.run_ansible(module.params)
if int(rval['status_code']) >= 300:
rval['msg'] = 'Failed. status_code {}'.format(rval['status_code'])
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
| apache-2.0 |
asimshankar/tensorflow | tensorflow/python/debug/cli/base_ui.py | 89 | 7715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Class of TensorFlow Debugger (tfdbg) Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
class BaseUI(object):
"""Base class of tfdbg user interface."""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
ERROR_MESSAGE_PREFIX = "ERROR: "
INFO_MESSAGE_PREFIX = "INFO: "
def __init__(self, on_ui_exit=None, config=None):
"""Constructor of the base class.
Args:
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
"""
self._on_ui_exit = on_ui_exit
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._config = config or cli_config.CLIConfig()
self._config_argparser = argparse.ArgumentParser(
description="config command", usage=argparse.SUPPRESS)
subparsers = self._config_argparser.add_subparsers()
set_parser = subparsers.add_parser("set")
set_parser.add_argument("property_name", type=str)
set_parser.add_argument("property_value", type=str)
set_parser = subparsers.add_parser("show")
self.register_command_handler(
"config",
self._config_command_handler,
self._config_argparser.format_help(),
prefix_aliases=["cfg"])
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (RichTextLines) Rich text lines appended to the beginning of
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def run_ui(self,
init_command=None,
title=None,
title_color=None,
enable_mouse_on_start=True):
"""Run the UI until user- or command- triggered exit.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on
start-up.
Returns:
An exit token of arbitrary type. Can be None.
"""
raise NotImplementedError("run_ui() is not implemented in BaseUI")
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
output_file_path: (str or None) The path to save the screen output
to (if any).
"""
command = command.strip()
if not command:
return "", [], None
command_items = command_parser.parse_command(command)
command_items, output_file_path = command_parser.extract_output_file_path(
command_items)
return command_items[0], command_items[1:], output_file_path
def _analyze_tab_complete_input(self, text):
"""Analyze raw input to tab-completer.
Args:
text: (str) the full, raw input text to be tab-completed.
Returns:
context: (str) the context str. For example,
If text == "print_tensor softmax", returns "print_tensor".
If text == "print", returns "".
If text == "", returns "".
prefix: (str) the prefix to be tab-completed, from the last word.
For example, if text == "print_tensor softmax", returns "softmax".
If text == "print", returns "print".
If text == "", returns "".
except_last_word: (str) the input text, except the last word.
For example, if text == "print_tensor softmax", returns "print_tensor".
If text == "print_tensor -a softmax", returns "print_tensor -a".
If text == "print", returns "".
If text == "", returns "".
"""
text = text.lstrip()
if not text:
# Empty (top-level) context.
context = ""
prefix = ""
except_last_word = ""
else:
items = text.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
except_last_word = ""
else:
# Multiple words.
context = items[0]
prefix = items[-1]
except_last_word = " ".join(items[:-1]) + " "
return context, prefix, except_last_word
@property
def config(self):
"""Obtain the CLIConfig of this `BaseUI` instance."""
return self._config
def _config_command_handler(self, args, screen_info=None):
"""Command handler for the "config" command."""
del screen_info # Currently unused.
parsed = self._config_argparser.parse_args(args)
if hasattr(parsed, "property_name") and hasattr(parsed, "property_value"):
# set.
self._config.set(parsed.property_name, parsed.property_value)
return self._config.summarize(highlight=parsed.property_name)
else:
# show.
return self._config.summarize()
| apache-2.0 |
blueshed/blueshed-py | src/blueshed/handlers/basic_auth.py | 1 | 1716 | '''
Created on 30 Jan 2014
@author: peterb
'''
import base64
import logging
class BasicAuthMixin(object):
"""
BasicAuthMixin
"""
def _request_auth(self, realm):
if self._headers_written: raise Exception('headers have already been written')
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.finish()
return False
def get_authenticated_user(self, auth_func, realm):
"""Requests HTTP basic authentication credentials from the client, or
authenticates the user if credentials are provided."""
try:
auth = self.request.headers.get('Authorization')
if auth == None: return self._request_auth(realm)
if not auth.startswith('Basic '): return self._request_auth(realm)
auth_decoded = base64.decodestring(auth[6:])
username, password = auth_decoded.split(':', 1)
if auth_func(self, realm, username, password):
return True
else:
return self._request_auth(realm)
except Exception:
logging.exception('basic-auth')
return self._request_auth(realm)
def basic_auth(realm, auth_func):
"""A decorator that can be used on methods that you wish to protect with
HTTP basic"""
def basic_auth_decorator(func):
def func_replacement(self, *args, **kwargs):
if self.get_authenticated_user(auth_func, realm):
return func(self, *args, **kwargs)
return func_replacement
return basic_auth_decorator
| mit |
stefanocasazza/FrameworkBenchmarks | frameworks/Python/web2py/app/standard/modules/controller.py | 24 | 1725 | # -*- coding: utf-8 -*-
from random import randint
from functools import partial
import json as jsonOut
from gluon import current
from database import Dal, RawDal, num_queries
def plaintext():
current.response.headers['Content-Type'] = 'text/plain'
return 'Hello, World!'
def json():
current.response.headers['Content-Type'] = 'application/json'
return jsonOut.dumps({'message': 'Hello, World!'})
def db():
current.response.headers['Content-Type']='application/json'
return jsonOut.dumps(Dal('World').get_world(randint(1, 10000)))
def queries():
current.response.headers['Content-Type']='application/json'
db = RawDal() if current.optimized else Dal('World')
get_world = db.get_world
r10k = partial(randint, 1, 10000)
worlds = [get_world(r10k()) for _ in
xrange(num_queries(current.request.vars.queries))]
return jsonOut.dumps(worlds)
def updates():
current.response.headers['Content-Type']='application/json'
db = RawDal() if current.optimized else Dal('World')
get_world = db.get_world
update_world = db.update_world
r10k = partial(randint, 1, 10000)
worlds = []
for wid in (r10k() for _ in xrange(num_queries(current.request.vars.queries))):
world = get_world(wid)
newNumber = r10k()
world['randomNumber'] = newNumber
worlds.append(world)
update_world(wid, newNumber)
return jsonOut.dumps(worlds)
def fortune():
new_message = {'id': 0, 'message': 'Additional fortune added at request time.'}
db = RawDal() if current.optimized else Dal('Fortune')
fortunes = db.get_fortunes(new_message=new_message)
return current.response.render('fortune.html', fortunes=fortunes)
| bsd-3-clause |
victor-rene/kivy-gamelib | stickman/bone.py | 1 | 2072 | import math
from kivy.graphics.context_instructions import PopMatrix, PushMatrix, Rotate
from kivy.properties import NumericProperty
from kivy.uix.image import Image
class Bone(Image):
angle = NumericProperty()
def __init__(self, **kw):
super(Bone, self).__init__(**kw)
self.name = kw['name'] if 'name' in kw else None
self.allow_stretch = True
self.keep_ratio = False
self.source = 'img/bone.png'
self.next = []
self.prev = None
self.head = None
self.tip = None
self.bone_length = 0
self.radius = None
with self.canvas.before:
PushMatrix()
self.rotation = Rotate()
with self.canvas.after:
PopMatrix()
self.bind(pos=self.update, size=self.update, angle=self.rotate)
def attach(self, bone):
bone.prev = self
self.next.append(bone)
def attach_all(self, bones):
for bone in bones:
self.attach(bone)
def rotate(self, *args):
if self.prev:
self.rotation.angle = self.prev.rotation.angle + self.angle
else: self.rotation.angle = self.angle
self.tip = self.get_tip_pos()
for bone in self.next:
self.coerce(bone)
def update(self, *args):
self.radius = self.width / 2
self.bone_length = self.height - self.radius * 2 # approximate for head / tip radii
self.head = self.x + self.radius, self.top - self.radius
self.tip = self.get_tip_pos()
# print 'head', self.head, self.prev, self.pos
self.rotation.origin = self.head
for bone in self.next:
self.coerce(bone)
def get_tip_pos(self):
a = (self.rotation.angle - 90) * math.pi / 180
dx = math.cos(a) * self.bone_length
dy = math.sin(a) * self.bone_length
return self.x + self.radius + dx, self.top - self.radius + dy
def set_head_pos(self, pos):
radius = self.width / 2
head_x, head_y = pos
self.pos = head_x - radius, head_y - radius - self.bone_length
def coerce(self, bone):
# print 'tip', self.get_tip_pos(), self.prev, self.pos
bone.set_head_pos(self.tip)
bone.rotate() | mit |
mykytamorachov/outpost | flask/lib/python2.7/site-packages/werkzeug/testsuite/formparser.py | 97 | 18740 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.formparser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the form parsing facilities.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os.path import join, dirname
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import formparser
from werkzeug.test import create_environ, Client
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import RequestEntityTooLarge
from werkzeug.datastructures import MultiDict
from werkzeug.formparser import parse_form_data
from werkzeug._compat import BytesIO
@Request.application
def form_data_consumer(request):
result_object = request.args['object']
if result_object == 'text':
return Response(repr(request.form['text']))
f = request.files[result_object]
return Response(b'\n'.join((
repr(f.filename).encode('ascii'),
repr(f.name).encode('ascii'),
repr(f.content_type).encode('ascii'),
f.stream.read()
)))
def get_contents(filename):
with open(filename, 'rb') as f:
return f.read()
class FormParserTestCase(WerkzeugTestCase):
def test_limiting(self):
data = b'foo=Hello+World&bar=baz'
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_content_length = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='application/x-www-form-urlencoded',
method='POST')
req.max_form_memory_size = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
data = (b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\n'
b'Hello World\r\n'
b'--foo\r\nContent-Disposition: form-field; name=bar\r\n\r\n'
b'bar=baz\r\n--foo--')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 4
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_content_length = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 7
self.assert_raises(RequestEntityTooLarge, lambda: req.form['foo'])
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
req.max_form_memory_size = 400
self.assert_strict_equal(req.form['foo'], u'Hello World')
def test_parse_form_data_put_without_content(self):
# A PUT without a Content-Type header returns empty data
# Both rfc1945 and rfc2616 (1.0 and 1.1) say "Any HTTP/[1.0/1.1] message
# containing an entity-body SHOULD include a Content-Type header field
# defining the media type of that body." In the case where either
# headers are omitted, parse_form_data should still work.
env = create_environ('/foo', 'http://example.org/', method='PUT')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_strict_equal(stream.read(), b'')
self.assert_strict_equal(len(form), 0)
self.assert_strict_equal(len(files), 0)
def test_parse_form_data_get_without_content(self):
env = create_environ('/foo', 'http://example.org/', method='GET')
del env['CONTENT_TYPE']
del env['CONTENT_LENGTH']
stream, form, files = formparser.parse_form_data(env)
self.assert_strict_equal(stream.read(), b'')
self.assert_strict_equal(len(form), 0)
self.assert_strict_equal(len(files), 0)
def test_large_file(self):
data = b'x' * (1024 * 600)
req = Request.from_values(data={'foo': (BytesIO(data), 'test.txt')},
method='POST')
# make sure we have a real file here, because we expect to be
# on the disk. > 1024 * 500
self.assert_true(hasattr(req.files['foo'].stream, u'fileno'))
# close file to prevent fds from leaking
req.files['foo'].close()
def test_streaming_parse(self):
data = b'x' * (1024 * 600)
class StreamMPP(formparser.MultiPartParser):
def parse(self, file, boundary, content_length):
i = iter(self.parse_lines(file, boundary, content_length))
one = next(i)
two = next(i)
return self.cls(()), {'one': one, 'two': two}
class StreamFDP(formparser.FormDataParser):
def _sf_parse_multipart(self, stream, mimetype,
content_length, options):
form, files = StreamMPP(
self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls).parse(stream, options.get('boundary').encode('ascii'),
content_length)
return stream, form, files
parse_functions = {}
parse_functions.update(formparser.FormDataParser.parse_functions)
parse_functions['multipart/form-data'] = _sf_parse_multipart
class StreamReq(Request):
form_data_parser_class = StreamFDP
req = StreamReq.from_values(data={'foo': (BytesIO(data), 'test.txt')},
method='POST')
self.assert_strict_equal('begin_file', req.files['one'][0])
self.assert_strict_equal(('foo', 'test.txt'), req.files['one'][1][1:])
self.assert_strict_equal('cont', req.files['two'][0])
self.assert_strict_equal(data, req.files['two'][1])
class MultiPartTestCase(WerkzeugTestCase):
def test_basic(self):
resources = join(dirname(__file__), 'multipart')
client = Client(form_data_consumer, Response)
repository = [
('firefox3-2png1txt', '---------------------------186454651713519341951581030105', [
(u'anchor.png', 'file1', 'image/png', 'file1.png'),
(u'application_edit.png', 'file2', 'image/png', 'file2.png')
], u'example text'),
('firefox3-2pnglongtext', '---------------------------14904044739787191031754711748', [
(u'accept.png', 'file1', 'image/png', 'file1.png'),
(u'add.png', 'file2', 'image/png', 'file2.png')
], u'--long text\r\n--with boundary\r\n--lookalikes--'),
('opera8-2png1txt', '----------zEO9jQKmLc2Cq88c23Dx19', [
(u'arrow_branch.png', 'file1', 'image/png', 'file1.png'),
(u'award_star_bronze_1.png', 'file2', 'image/png', 'file2.png')
], u'blafasel öäü'),
('webkit3-2png1txt', '----WebKitFormBoundaryjdSFhcARk8fyGNy6', [
(u'gtk-apply.png', 'file1', 'image/png', 'file1.png'),
(u'gtk-no.png', 'file2', 'image/png', 'file2.png')
], u'this is another text with ümläüts'),
('ie6-2png1txt', '---------------------------7d91b03a20128', [
(u'file1.png', 'file1', 'image/x-png', 'file1.png'),
(u'file2.png', 'file2', 'image/x-png', 'file2.png')
], u'ie6 sucks :-/')
]
for name, boundary, files, text in repository:
folder = join(resources, name)
data = get_contents(join(folder, 'request.txt'))
for filename, field, content_type, fsname in files:
response = client.post('/?object=' + field, data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
lines = response.get_data().split(b'\n', 3)
self.assert_strict_equal(lines[0], repr(filename).encode('ascii'))
self.assert_strict_equal(lines[1], repr(field).encode('ascii'))
self.assert_strict_equal(lines[2], repr(content_type).encode('ascii'))
self.assert_strict_equal(lines[3], get_contents(join(folder, fsname)))
response = client.post('/?object=text', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary,
content_length=len(data))
self.assert_strict_equal(response.get_data(), repr(text).encode('utf-8'))
def test_ie7_unc_path(self):
client = Client(form_data_consumer, Response)
data_file = join(dirname(__file__), 'multipart', 'ie7_full_path_request.txt')
data = get_contents(data_file)
boundary = '---------------------------7da36d1b4a0164'
response = client.post('/?object=cb_file_upload_multiple', data=data, content_type=
'multipart/form-data; boundary="%s"' % boundary, content_length=len(data))
lines = response.get_data().split(b'\n', 3)
self.assert_strict_equal(lines[0],
repr(u'Sellersburg Town Council Meeting 02-22-2010doc.doc').encode('ascii'))
def test_end_of_file(self):
# This test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
b'Content-Type: text/plain\r\n\r\n'
b'file contents and no end'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_true(not data.files)
self.assert_true(not data.form)
def test_broken(self):
data = (
'--foo\r\n'
'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n'
'Content-Transfer-Encoding: base64\r\n'
'Content-Type: text/plain\r\n\r\n'
'broken base 64'
'--foo--'
)
_, form, files = formparser.parse_form_data(create_environ(data=data,
method='POST', content_type='multipart/form-data; boundary=foo'))
self.assert_true(not files)
self.assert_true(not form)
self.assert_raises(ValueError, formparser.parse_form_data,
create_environ(data=data, method='POST',
content_type='multipart/form-data; boundary=foo'),
silent=False)
def test_file_no_content_type(self):
data = (
b'--foo\r\n'
b'Content-Disposition: form-data; name="test"; filename="test.txt"\r\n\r\n'
b'file contents\r\n--foo--'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_equal(data.files['test'].filename, 'test.txt')
self.assert_strict_equal(data.files['test'].read(), b'file contents')
def test_extra_newline(self):
# this test looks innocent but it was actually timeing out in
# the Werkzeug 0.5 release version (#394)
data = (
b'\r\n\r\n--foo\r\n'
b'Content-Disposition: form-data; name="foo"\r\n\r\n'
b'a string\r\n'
b'--foo--'
)
data = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_true(not data.files)
self.assert_strict_equal(data.form['foo'], u'a string')
def test_headers(self):
data = (b'--foo\r\n'
b'Content-Disposition: form-data; name="foo"; filename="foo.txt"\r\n'
b'X-Custom-Header: blah\r\n'
b'Content-Type: text/plain; charset=utf-8\r\n\r\n'
b'file contents, just the contents\r\n'
b'--foo--')
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
foo = req.files['foo']
self.assert_strict_equal(foo.mimetype, 'text/plain')
self.assert_strict_equal(foo.mimetype_params, {'charset': 'utf-8'})
self.assert_strict_equal(foo.headers['content-type'], foo.content_type)
self.assert_strict_equal(foo.content_type, 'text/plain; charset=utf-8')
self.assert_strict_equal(foo.headers['x-custom-header'], 'blah')
def test_nonstandard_line_endings(self):
for nl in b'\n', b'\r', b'\r\n':
data = nl.join((
b'--foo',
b'Content-Disposition: form-data; name=foo',
b'',
b'this is just bar',
b'--foo',
b'Content-Disposition: form-data; name=bar',
b'',
b'blafasel',
b'--foo--'
))
req = Request.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
self.assert_strict_equal(req.form['foo'], u'this is just bar')
self.assert_strict_equal(req.form['bar'], u'blafasel')
def test_failures(self):
def parse_multipart(stream, boundary, content_length):
parser = formparser.MultiPartParser(content_length)
return parser.parse(stream, boundary, content_length)
self.assert_raises(ValueError, parse_multipart, BytesIO(), b'broken ', 0)
data = b'--foo\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n' \
b'Content-Transfer-Encoding: base64\r\n\r\nHello World\r\n--foo--'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
data = b'--foo\r\nContent-Disposition: form-field; name=foo\r\n\r\nHello World\r\n'
self.assert_raises(ValueError, parse_multipart, BytesIO(data), b'foo', len(data))
x = formparser.parse_multipart_headers(['foo: bar\r\n', ' x test\r\n'])
self.assert_strict_equal(x['foo'], 'bar\n x test')
self.assert_raises(ValueError, formparser.parse_multipart_headers,
['foo: bar\r\n', ' x test'])
def test_bad_newline_bad_newline_assumption(self):
class ISORequest(Request):
charset = 'latin1'
contents = b'U2vlbmUgbORu'
data = b'--foo\r\nContent-Disposition: form-data; name="test"\r\n' \
b'Content-Transfer-Encoding: base64\r\n\r\n' + \
contents + b'\r\n--foo--'
req = ISORequest.from_values(input_stream=BytesIO(data),
content_length=len(data),
content_type='multipart/form-data; boundary=foo',
method='POST')
self.assert_strict_equal(req.form['test'], u'Sk\xe5ne l\xe4n')
def test_empty_multipart(self):
environ = {}
data = b'--boundary--'
environ['REQUEST_METHOD'] = 'POST'
environ['CONTENT_TYPE'] = 'multipart/form-data; boundary=boundary'
environ['CONTENT_LENGTH'] = str(len(data))
environ['wsgi.input'] = BytesIO(data)
stream, form, files = parse_form_data(environ, silent=False)
rv = stream.read()
self.assert_equal(rv, b'')
self.assert_equal(form, MultiDict())
self.assert_equal(files, MultiDict())
class InternalFunctionsTestCase(WerkzeugTestCase):
def test_line_parser(self):
assert formparser._line_parse('foo') == ('foo', False)
assert formparser._line_parse('foo\r\n') == ('foo', True)
assert formparser._line_parse('foo\r') == ('foo', True)
assert formparser._line_parse('foo\n') == ('foo', True)
def test_find_terminator(self):
lineiter = iter(b'\n\n\nfoo\nbar\nbaz'.splitlines(True))
find_terminator = formparser.MultiPartParser()._find_terminator
line = find_terminator(lineiter)
self.assert_equal(line, b'foo')
self.assert_equal(list(lineiter), [b'bar\n', b'baz'])
self.assert_equal(find_terminator([]), b'')
self.assert_equal(find_terminator([b'']), b'')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FormParserTestCase))
suite.addTest(unittest.makeSuite(MultiPartTestCase))
suite.addTest(unittest.makeSuite(InternalFunctionsTestCase))
return suite
| gpl-2.0 |
joshzarrabi/e-mission-server | emission/storage/decorations/useful_queries.py | 2 | 3911 | # Standard imports
from datetime import datetime, timedelta
import logging
# Our imports
from emission.core.get_database import get_section_db
import emission.core.wrapper.trip_old as rt
def get_all_sections(section_id):
""" Return all sections in the trip that the specified section is a part of
For example, if this is the section to go to the train station, return all
sections for the same trip.
The input is the _id field of the section
"""
section = rt.Section.section_from_json(get_section_db().find_one({'_id': section_id}))
allSections = get_section_db().find({"trip_id": section.trip_id})
return list(allSections)
def get_all_sections_for_user_day(user,year,month,day):
""" Return all sections in the trip that the specified section is a part of
For example, if this is the section to go to the train station, return all
sections for the same trip.
The input is the _id field of the section
"""
dayMidnight = datetime(year,month,day,0,0,0)
nextDayMidnight = dayMidnight + timedelta(days =1)
sectionIt = get_section_db().find({'user_id': user,
"section_start_datetime": {"$gt": dayMidnight},
"section_end_datetime": {"$lt": nextDayMidnight}})
return [rt.Section.section_from_json(s) for s in sectionIt]
def get_trip_before(section_id):
""" Return the trip just before the one that this section belongs to.
"""
section = rt.Section.section_from_json(get_section_db().find_one({'_id': section_id}))
logging.debug("Found section %s" % section)
firstSection = rt.Section.section_from_json(get_section_db().find_one({"trip_id": section.trip_id, "section_id": 0}))
logging.debug("First section %s" % firstSection)
# First, try to find the seection assuming that data collection was continuous
prevPlace = rt.Section.section_from_json(get_section_db().find_one({"section_end_datetime": firstSection.start_time}))
logging.debug("prevPlace %s" % prevPlace)
# This should be the "place" trip
if prevPlace is not None:
logging.debug("prevPlace.section_type = %s" % prevPlace.section_type)
if prevPlace.section_type != "place":
return None
else:
prevTrip = get_section_db().find_one({"section_end_datetime": prevPlace.start_time})
return prevTrip
else:
assert(False)
return allSections
def get_bounds(sectionList):
# Lat and lng are going to be in the range of -180 to 180.
# So let's pick large positive and negative numbers to initialize them
min_lat = 999999
min_lon = 999999
max_lat = -9999999
max_lon = -9999999
for sectionJSON in sectionList:
section = rt.Section.section_from_json(sectionJSON)
logging.debug("Testing start point %s " % section.section_start_location)
if section.section_start_location.lat < min_lat:
min_lat = section.section_start_location.lat
if section.section_start_location.lon < min_lon:
min_lon = section.section_start_location.lon
logging.debug("Testing end point %s " % section.section_end_location)
if section.section_end_location.lat > max_lat:
max_lat = section.section_end_location.lat
if section.section_end_location.lon > max_lon:
max_lon = section.section_end_location.lon
return (rt.Coordinate(min_lat, min_lon), rt.Coordinate(max_lat, max_lon))
def get_center_for_section(sectionJSON):
"""
Returns a tuple (lat, lon) that can be passsed in to pygmaps to create a map
centered at the correct location
"""
return ((sectionJSON["section_start_point"]["coordinates"][1] +
sectionJSON["section_end_point"]["coordinates"][1])/2,
(sectionJSON["section_start_point"]["coordinates"][0] +
sectionJSON["section_end_point"]["coordinates"][0])/2)
| bsd-3-clause |
pombredanne/MOG | nova/tests/virt/test_virt.py | 5 | 5363 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import test
from nova import utils
from nova.virt.disk import api as disk_api
from nova.virt import driver
class TestVirtDriver(test.NoDBTestCase):
def test_block_device(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
empty_block_device_info = {}
self.assertEqual(
driver.block_device_info_get_root(block_device_info), '/dev/sda')
self.assertEqual(
driver.block_device_info_get_root(empty_block_device_info), None)
self.assertEqual(
driver.block_device_info_get_root(None), None)
self.assertEqual(
driver.block_device_info_get_swap(block_device_info), swap)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['device_name'], None)
self.assertEqual(driver.block_device_info_get_swap(
empty_block_device_info)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['device_name'],
None)
self.assertEqual(
driver.block_device_info_get_swap({'swap': None})['swap_size'],
0)
self.assertEqual(
driver.block_device_info_get_swap(None)['device_name'], None)
self.assertEqual(
driver.block_device_info_get_swap(None)['swap_size'], 0)
self.assertEqual(
driver.block_device_info_get_ephemerals(block_device_info),
ephemerals)
self.assertEqual(
driver.block_device_info_get_ephemerals(empty_block_device_info),
[])
self.assertEqual(
driver.block_device_info_get_ephemerals(None),
[])
def test_swap_is_usable(self):
self.assertFalse(driver.swap_is_usable(None))
self.assertFalse(driver.swap_is_usable({'device_name': None}))
self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 0}))
self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb',
'swap_size': 1}))
class TestVirtDisk(test.NoDBTestCase):
def setUp(self):
super(TestVirtDisk, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_lxc_teardown_container(self):
def proc_mounts(self, mount_point):
mount_points = {
'/mnt/loop/nopart': '/dev/loop0',
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
}
return mount_points[mount_point]
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.teardown_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
self.assertEqual(self.executes, expected_commands)
| apache-2.0 |
McHatters/HippieStation13 | bot/C_rtd.py | 67 | 3045 | import random
def rtd(data,debug,sender):
backo = data
try:
arg1,arg2 = backo.split("d")
except ValueError, err:
return("Too many or too small amount of arguments")
else:
if debug:
print sender+":!rtd "+arg1+"d"+arg2 #faster than using %s's
die,die2 = [],[]
current_mark = ""
outcome = 0
realnumberfound = False
checks = []
count = 0
arg1 = arg1.replace(" ","")
arg2 = arg2.replace(" ","")
try:
i_arg1 = int(arg1)
a_arg1 = abs(i_arg1)
if "+" in arg2 or "-" in arg2:
plus_spot = arg2.find("+")
minus_spot = arg2.find("-")
if plus_spot == -1 and minus_spot == -1:
nicer_form = ""
elif plus_spot != -1 and minus_spot == -1:
nicer_form = arg2[plus_spot:]
elif plus_spot == -1 and minus_spot != -1:
nicer_form = arg2[minus_spot:]
else:
if plus_spot < minus_spot:
nicer_form = arg2[plus_spot:]
else:
nicer_form = arg2[minus_spot:]
for letter in arg2:
if letter == "+" or letter == "-":
current_mark = letter
checks = []
count += 1
continue
checks.append(letter)
try:
next_up = arg2[count+1]
except:
if realnumberfound == False:
i_arg2 = int("".join(checks))
checks = []
realnumberfound = True
elif current_mark == "+":
outcome += int("".join(checks))
else:
outcome -= int("".join(checks))
else:
if next_up == "+" or next_up == "-":
if realnumberfound == False:
i_arg2 = int("".join(checks))
checks = []
realnumberfound = True
else:
if current_mark == "+":
outcome += int("".join(checks))
else:
outcome -= int("".join(checks))
checks = []
count += 1
else:
i_arg2 = int(arg2)
if a_arg1 == 0 or abs(i_arg2) == 0:
raise RuntimeError
except ValueError:
return("You lied! That's not a number!")
except RuntimeError:
return("Too many zeroes!")
else:
if a_arg1 > 100:
return("Too many rolls, I can only do one hundred at max.")
else:
for i in xrange(0,a_arg1):
if i_arg2 < 0:
dice = random.randint(i_arg2,0)
else:
dice = random.randint(1,i_arg2)
die.append(dice)
die2.append(str(dice))
if i_arg2 < 0:
flist = "".join(die2)
else:
flist = "+".join(die2)
if len(flist) > 350:
return(str(reduce(lambda x,y: x+y, die)+outcome))
else:
if current_mark == "":
return(flist+" = "+str(reduce(lambda x,y: x+y, die)+outcome))
else:
return(flist+" ("+nicer_form+") = "+str(reduce(lambda x,y: x+y, die)+outcome))
| agpl-3.0 |
kostajaitachi/shogun | tests/integration/python_modular/test_one.py | 21 | 2671 | #!/usr/bin/env python
"""
Test one data file
"""
from numpy import *
import sys
import kernel
import distance
import classifier
import clustering
import distribution
import regression
import preprocessor
from modshogun import Math_init_random
SUPPORTED=['kernel', 'distance', 'classifier', 'clustering', 'distribution',
'regression', 'preprocessor']
def _get_name_fun (fnam):
module=None
for supported in SUPPORTED:
if fnam.find(supported)>-1:
module=supported
break
if module is None:
print('Module required for %s not supported yet!' % fnam)
return None
return module+'.test'
def _test_mfile (fnam):
try:
mfile=open(fnam, mode='r')
except IOError as e:
print(e)
return False
indata={}
name_fun=_get_name_fun(fnam)
if name_fun is None:
return False
for line in mfile:
line=line.strip(" \t\n;")
param = line.split('=')[0].strip()
if param=='name':
name=line.split('=')[1].strip().split("'")[1]
indata[param]=name
elif param=='kernel_symdata' or param=='kernel_data':
indata[param]=_read_matrix(line)
elif param.startswith('kernel_matrix') or \
param.startswith('distance_matrix'):
indata[param]=_read_matrix(line)
elif param.find('data_train')>-1 or param.find('data_test')>-1:
# data_{train,test} might be prepended by 'subkernelX_'
indata[param]=_read_matrix(line)
elif param=='classifier_alphas' or param=='classifier_support_vectors':
try:
indata[param]=eval(line.split('=')[1])
except SyntaxError: # might be MultiClass SVM and hence matrix
indata[param]=_read_matrix(line)
elif param=='clustering_centers' or param=='clustering_pairs':
indata[param]=_read_matrix(line)
else:
if (line.find("'")==-1):
indata[param]=eval(line.split('=')[1])
else:
indata[param]=line.split('=')[1].strip().split("'")[1]
mfile.close()
fun=eval(name_fun)
# seed random to constant value used at data file's creation
Math_init_random(indata['init_random'])
random.seed(indata['init_random'])
return fun(indata)
def _read_matrix (line):
try:
str_line=(line.split('[')[1]).split(']')[0]
except IndexError:
str_line=(line.split('{')[1]).split('}')[0]
lines=str_line.split(';')
lis2d=list()
for x in lines:
lis=list()
for y in x.split(','):
y=y.replace("'","").strip()
if(y.isalpha()):
lis.append(y)
else:
if y.find('.')!=-1:
lis.append(float(y))
else:
try:
lis.append(int(y))
except ValueError: # not int, RAWDNA?
lis.append(y)
lis2d.append(lis)
return array(lis2d)
for filename in sys.argv:
if (filename.endswith('.m')):
res=_test_mfile(filename)
if res:
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 |
Cyrillic327/p2pool | p2pool/test/test_node.py | 198 | 10503 | from __future__ import division
import base64
import random
import tempfile
from twisted.internet import defer, reactor
from twisted.python import failure
from twisted.trial import unittest
from twisted.web import client, resource, server
from p2pool import data, node, work
from p2pool.bitcoin import data as bitcoin_data, networks, worker_interface
from p2pool.util import deferral, jsonrpc, math, variable
class bitcoind(object): # can be used as p2p factory, p2p protocol, or rpc jsonrpc proxy
def __init__(self):
self.blocks = [0x000000000000016c169477c25421250ec5d32cf9c6d38538b5de970a2355fd89]
self.headers = {0x16c169477c25421250ec5d32cf9c6d38538b5de970a2355fd89: {
'nonce': 1853158954,
'timestamp': 1351658517,
'merkle_root': 2282849479936278423916707524932131168473430114569971665822757638339486597658L,
'version': 1,
'previous_block': 1048610514577342396345362905164852351970507722694242579238530L,
'bits': bitcoin_data.FloatingInteger(bits=0x1a0513c5, target=0x513c50000000000000000000000000000000000000000000000L),
}}
self.conn = variable.Variable(self)
self.new_headers = variable.Event()
self.new_block = variable.Event()
self.new_tx = variable.Event()
# p2p factory
def getProtocol(self):
return self
# p2p protocol
def send_block(self, block):
pass
def send_tx(self, tx):
pass
def get_block_header(self, block_hash):
return self.headers[block_hash]
# rpc jsonrpc proxy
def rpc_help(self):
return '\ngetblock '
def rpc_getblock(self, block_hash_hex):
block_hash = int(block_hash_hex, 16)
return dict(height=self.blocks.index(block_hash))
def __getattr__(self, name):
if name.startswith('rpc_'):
return lambda *args, **kwargs: failure.Failure(jsonrpc.Error_for_code(-32601)('Method not found'))
def rpc_getblocktemplate(self, param):
if param['mode'] == 'template':
pass
elif param['mode'] == 'submit':
result = param['data']
block = bitcoin_data.block_type.unpack(result.decode('hex'))
if sum(tx_out['value'] for tx_out in block['txs'][0]['tx_outs']) != sum(tx['tx_outs'][0]['value'] for tx in block['txs'][1:]) + 5000000000:
print 'invalid fee'
if block['header']['previous_block'] != self.blocks[-1]:
return False
if bitcoin_data.hash256(result.decode('hex')) > block['header']['bits'].target:
return False
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(block['header']))
self.blocks.append(header_hash)
self.headers[header_hash] = block['header']
reactor.callLater(0, self.new_block.happened)
return True
else:
raise jsonrpc.Error_for_code(-1)('invalid request')
txs = []
for i in xrange(100):
fee = i
txs.append(dict(
data=bitcoin_data.tx_type.pack(dict(version=1, tx_ins=[], tx_outs=[dict(value=fee, script='hello!'*100)], lock_time=0)).encode('hex'),
fee=fee,
))
return {
"version" : 2,
"previousblockhash" : '%064x' % (self.blocks[-1],),
"transactions" : txs,
"coinbaseaux" : {
"flags" : "062f503253482f"
},
"coinbasevalue" : 5000000000 + sum(tx['fee'] for tx in txs),
"target" : "0000000000000513c50000000000000000000000000000000000000000000000",
"mintime" : 1351655621,
"mutable" : [
"time",
"transactions",
"prevblock"
],
"noncerange" : "00000000ffffffff",
"sigoplimit" : 20000,
"sizelimit" : 1000000,
"curtime" : 1351659940,
"bits" : "21008000",
"height" : len(self.blocks),
}
@apply
class mm_provider(object):
def __getattr__(self, name):
print '>>>>>>>', name
def rpc_getauxblock(self, request, result1=None, result2=None):
if result1 is not None:
print result1, result2
return True
return {
"target" : "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", # 2**256*2/3
"hash" : "2756ea0315d46dc3d8d974f34380873fc88863845ac01a658ef11bc3b368af52",
"chainid" : 1
}
mynet = math.Object(
NAME='mynet',
PARENT=networks.nets['litecoin_testnet'],
SHARE_PERIOD=5, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=3, # blocks
IDENTIFIER='cca5e24ec6408b1e'.decode('hex'),
PREFIX='ad9614f6466a39cf'.decode('hex'),
P2P_PORT=19338,
MIN_TARGET=2**256 - 1,
MAX_TARGET=2**256 - 1,
PERSIST=False,
WORKER_PORT=19327,
BOOTSTRAP_ADDRS='72.14.191.28'.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: True,
)
class MiniNode(object):
@classmethod
@defer.inlineCallbacks
def start(cls, net, factory, bitcoind, peer_ports, merged_urls):
self = cls()
self.n = node.Node(factory, bitcoind, [], [], net)
yield self.n.start()
self.n.p2p_node = node.P2PNode(self.n, port=0, max_incoming_conns=1000000, addr_store={}, connect_addrs=[('127.0.0.1', peer_port) for peer_port in peer_ports])
self.n.p2p_node.start()
wb = work.WorkerBridge(node=self.n, my_pubkey_hash=random.randrange(2**160), donation_percentage=random.uniform(0, 10), merged_urls=merged_urls, worker_fee=3)
self.wb = wb
web_root = resource.Resource()
worker_interface.WorkerInterface(wb).attach_to(web_root)
self.web_port = reactor.listenTCP(0, server.Site(web_root))
defer.returnValue(self)
@defer.inlineCallbacks
def stop(self):
yield self.web_port.stopListening()
yield self.n.p2p_node.stop()
yield self.n.stop()
del self.web_port, self.n
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_node(self):
bitd = bitcoind()
mm_root = resource.Resource()
mm_root.putChild('', jsonrpc.HTTPServer(mm_provider))
mm_port = reactor.listenTCP(0, server.Site(mm_root))
n = node.Node(bitd, bitd, [], [], mynet)
yield n.start()
wb = work.WorkerBridge(node=n, my_pubkey_hash=42, donation_percentage=2, merged_urls=[('http://127.0.0.1:%i' % (mm_port.getHost().port,), '')], worker_fee=3)
web_root = resource.Resource()
worker_interface.WorkerInterface(wb).attach_to(web_root)
port = reactor.listenTCP(0, server.Site(web_root))
proxy = jsonrpc.HTTPProxy('http://127.0.0.1:' + str(port.getHost().port),
headers=dict(Authorization='Basic ' + base64.b64encode('user/0:password')))
yield deferral.sleep(3)
for i in xrange(100):
blah = yield proxy.rpc_getwork()
yield proxy.rpc_getwork(blah['data'])
yield deferral.sleep(3)
assert len(n.tracker.items) == 100
assert n.tracker.verified.get_height(n.best_share_var.value) == 100
wb.stop()
n.stop()
yield port.stopListening()
del n, wb, web_root, port, proxy
import gc
gc.collect()
gc.collect()
gc.collect()
yield deferral.sleep(20) # waiting for work_poller to exit
yield mm_port.stopListening()
#test_node.timeout = 15
@defer.inlineCallbacks
def test_nodes(self):
N = 3
SHARES = 600
bitd = bitcoind()
nodes = []
for i in xrange(N):
nodes.append((yield MiniNode.start(mynet, bitd, bitd, [mn.n.p2p_node.serverfactory.listen_port.getHost().port for mn in nodes], [])))
yield deferral.sleep(3)
for i in xrange(SHARES):
proxy = jsonrpc.HTTPProxy('http://127.0.0.1:' + str(random.choice(nodes).web_port.getHost().port),
headers=dict(Authorization='Basic ' + base64.b64encode('user/0:password')))
blah = yield proxy.rpc_getwork()
yield proxy.rpc_getwork(blah['data'])
yield deferral.sleep(.05)
print i
print type(nodes[0].n.tracker.items[nodes[0].n.best_share_var.value])
# crawl web pages
from p2pool import web
stop_event = variable.Event()
web2_root = web.get_web_root(nodes[0].wb, tempfile.mkdtemp(), variable.Variable(None), stop_event)
web2_port = reactor.listenTCP(0, server.Site(web2_root))
for name in web2_root.listNames() + ['web/' + x for x in web2_root.getChildWithDefault('web', None).listNames()]:
if name in ['web/graph_data', 'web/share', 'web/share_data']: continue
print
print name
try:
res = yield client.getPage('http://127.0.0.1:%i/%s' % (web2_port.getHost().port, name))
except:
import traceback
traceback.print_exc()
else:
print repr(res)[:100]
print
yield web2_port.stopListening()
stop_event.happened()
del web2_root
yield deferral.sleep(3)
for i, n in enumerate(nodes):
assert len(n.n.tracker.items) == SHARES, (i, len(n.n.tracker.items))
assert n.n.tracker.verified.get_height(n.n.best_share_var.value) == SHARES, (i, n.n.tracker.verified.get_height(n.n.best_share_var.value))
assert type(n.n.tracker.items[nodes[0].n.best_share_var.value]) is (data.Share.SUCCESSOR if data.Share.SUCCESSOR is not None else data.Share)
assert type(n.n.tracker.items[n.n.tracker.get_nth_parent_hash(nodes[0].n.best_share_var.value, SHARES - 5)]) is data.Share
for n in nodes:
yield n.stop()
del nodes, n
import gc
gc.collect()
gc.collect()
gc.collect()
yield deferral.sleep(20) # waiting for work_poller to exit
test_nodes.timeout = 300
| gpl-3.0 |
liberatorqjw/scikit-learn | sklearn/tree/export.py | 30 | 4529 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
esthermm/odoomrp-utils | stock_picking_customer_ref/__openerp__.py | 12 | 1446 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2015 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2015 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Stock Picking Customer Ref",
'version': "1.0",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
'website': "http://www.odoomrp.com",
'category': 'Warehouse Management',
"depends": ["sale",
"stock",
"sale_stock"
],
'data': ["views/stock_picking_view.xml"],
"installable": True,
}
| agpl-3.0 |
djmaze/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/patchstatus.py | 146 | 1974 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class PatchStatus(webapp.RequestHandler):
def get(self, queue_name, attachment_id):
statuses = QueueStatus.all().filter('queue_name =', queue_name).filter('active_patch_id =', int(attachment_id)).order('-date').fetch(1)
if not statuses:
self.error(404)
return
self.response.out.write(statuses[0].message)
| bsd-3-clause |
zsjohny/jumpserver | apps/assets/serializers/asset.py | 1 | 6406 | # -*- coding: utf-8 -*-
#
from rest_framework import serializers
from django.db.models import Prefetch, F
from django.utils.translation import ugettext_lazy as _
from orgs.mixins.serializers import BulkOrgResourceModelSerializer
from common.serializers import AdaptedBulkListSerializer
from ..models import Asset, Node, Label, Platform
from .base import ConnectivitySerializer
__all__ = [
'AssetSerializer', 'AssetSimpleSerializer',
'AssetDisplaySerializer',
'ProtocolsField', 'PlatformSerializer',
'AssetDetailSerializer', 'AssetTaskSerializer',
]
class ProtocolField(serializers.RegexField):
protocols = '|'.join(dict(Asset.PROTOCOL_CHOICES).keys())
default_error_messages = {
'invalid': _('Protocol format should {}/{}'.format(protocols, '1-65535'))
}
regex = r'^(%s)/(\d{1,5})$' % protocols
def __init__(self, *args, **kwargs):
super().__init__(self.regex, **kwargs)
def validate_duplicate_protocols(values):
errors = []
names = []
for value in values:
if not value or '/' not in value:
continue
name = value.split('/')[0]
if name in names:
errors.append(_("Protocol duplicate: {}").format(name))
names.append(name)
errors.append('')
if any(errors):
raise serializers.ValidationError(errors)
class ProtocolsField(serializers.ListField):
default_validators = [validate_duplicate_protocols]
def __init__(self, *args, **kwargs):
kwargs['child'] = ProtocolField()
kwargs['allow_null'] = True
kwargs['allow_empty'] = True
kwargs['min_length'] = 1
kwargs['max_length'] = 4
super().__init__(*args, **kwargs)
def to_representation(self, value):
if not value:
return []
return value.split(' ')
class AssetSerializer(BulkOrgResourceModelSerializer):
platform = serializers.SlugRelatedField(
slug_field='name', queryset=Platform.objects.all(), label=_("Platform")
)
protocols = ProtocolsField(label=_('Protocols'), required=False)
"""
资产的数据结构
"""
class Meta:
model = Asset
list_serializer_class = AdaptedBulkListSerializer
fields = [
'id', 'ip', 'hostname', 'protocol', 'port',
'protocols', 'platform', 'is_active', 'public_ip', 'domain',
'admin_user', 'nodes', 'labels', 'number', 'vendor', 'model', 'sn',
'cpu_model', 'cpu_count', 'cpu_cores', 'cpu_vcpus', 'memory',
'disk_total', 'disk_info', 'os', 'os_version', 'os_arch',
'hostname_raw', 'comment', 'created_by', 'date_created',
'hardware_info',
]
read_only_fields = (
'vendor', 'model', 'sn', 'cpu_model', 'cpu_count',
'cpu_cores', 'cpu_vcpus', 'memory', 'disk_total', 'disk_info',
'os', 'os_version', 'os_arch', 'hostname_raw',
'created_by', 'date_created',
)
extra_kwargs = {
'protocol': {'write_only': True},
'port': {'write_only': True},
'hardware_info': {'label': _('Hardware info')},
'org_name': {'label': _('Org name')}
}
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset.prefetch_related(
Prefetch('nodes', queryset=Node.objects.all().only('id')),
Prefetch('labels', queryset=Label.objects.all().only('id')),
).select_related('admin_user', 'domain', 'platform') \
.annotate(platform_base=F('platform__base'))
return queryset
def compatible_with_old_protocol(self, validated_data):
protocols_data = validated_data.pop("protocols", [])
# 兼容老的api
name = validated_data.get("protocol")
port = validated_data.get("port")
if not protocols_data and name and port:
protocols_data.insert(0, '/'.join([name, str(port)]))
elif not name and not port and protocols_data:
protocol = protocols_data[0].split('/')
validated_data["protocol"] = protocol[0]
validated_data["port"] = int(protocol[1])
if protocols_data:
validated_data["protocols"] = ' '.join(protocols_data)
def create(self, validated_data):
self.compatible_with_old_protocol(validated_data)
instance = super().create(validated_data)
return instance
def update(self, instance, validated_data):
self.compatible_with_old_protocol(validated_data)
return super().update(instance, validated_data)
class AssetDisplaySerializer(AssetSerializer):
connectivity = ConnectivitySerializer(read_only=True, label=_("Connectivity"))
class Meta(AssetSerializer.Meta):
fields = [
'id', 'ip', 'hostname', 'protocol', 'port',
'protocols', 'is_active', 'public_ip',
'number', 'vendor', 'model', 'sn',
'cpu_model', 'cpu_count', 'cpu_cores', 'cpu_vcpus', 'memory',
'disk_total', 'disk_info', 'os', 'os_version', 'os_arch',
'hostname_raw', 'comment', 'created_by', 'date_created',
'hardware_info', 'connectivity',
]
@classmethod
def setup_eager_loading(cls, queryset):
""" Perform necessary eager loading of data. """
queryset = queryset\
.annotate(admin_user_username=F('admin_user__username'))
return queryset
class PlatformSerializer(serializers.ModelSerializer):
meta = serializers.DictField(required=False, allow_null=True)
class Meta:
model = Platform
fields = [
'id', 'name', 'base', 'charset',
'internal', 'meta', 'comment'
]
class AssetDetailSerializer(AssetSerializer):
platform = PlatformSerializer(read_only=True)
class AssetSimpleSerializer(serializers.ModelSerializer):
connectivity = ConnectivitySerializer(read_only=True, label=_("Connectivity"))
class Meta:
model = Asset
fields = ['id', 'hostname', 'ip', 'connectivity', 'port']
class AssetTaskSerializer(serializers.Serializer):
ACTION_CHOICES = (
('refresh', 'refresh'),
('test', 'test'),
)
task = serializers.CharField(read_only=True)
action = serializers.ChoiceField(choices=ACTION_CHOICES, write_only=True)
| gpl-2.0 |
Lloir/pc-kernel | scripts/gdb/linux/dmesg.py | 367 | 2005 | #
# gdb helper commands and functions for Linux kernel debugging
#
# kernel log buffer dump
#
# Copyright (c) Siemens AG, 2011, 2012
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
import string
from linux import utils
class LxDmesg(gdb.Command):
"""Print Linux kernel log buffer."""
def __init__(self):
super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16)
log_first_idx = int(gdb.parse_and_eval("log_first_idx"))
log_next_idx = int(gdb.parse_and_eval("log_next_idx"))
log_buf_len = int(gdb.parse_and_eval("log_buf_len"))
inf = gdb.inferiors()[0]
start = log_buf_addr + log_first_idx
if log_first_idx < log_next_idx:
log_buf_2nd_half = -1
length = log_next_idx - log_first_idx
log_buf = inf.read_memory(start, length)
else:
log_buf_2nd_half = log_buf_len - log_first_idx
log_buf = inf.read_memory(start, log_buf_2nd_half) + \
inf.read_memory(log_buf_addr, log_next_idx)
pos = 0
while pos < log_buf.__len__():
length = utils.read_u16(log_buf[pos + 8:pos + 10])
if length == 0:
if log_buf_2nd_half == -1:
gdb.write("Corrupted log buffer!\n")
break
pos = log_buf_2nd_half
continue
text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
text = log_buf[pos + 16:pos + 16 + text_len]
time_stamp = utils.read_u64(log_buf[pos:pos + 8])
for line in memoryview(text).tobytes().splitlines():
gdb.write("[{time:12.6f}] {line}\n".format(
time=time_stamp / 1000000000.0,
line=line))
pos += length
LxDmesg()
| gpl-2.0 |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/etc/apps/headphones/lib/unidecode/x0b9.py | 253 | 4704 | data = (
'ruk', # 0x00
'rut', # 0x01
'rup', # 0x02
'ruh', # 0x03
'rweo', # 0x04
'rweog', # 0x05
'rweogg', # 0x06
'rweogs', # 0x07
'rweon', # 0x08
'rweonj', # 0x09
'rweonh', # 0x0a
'rweod', # 0x0b
'rweol', # 0x0c
'rweolg', # 0x0d
'rweolm', # 0x0e
'rweolb', # 0x0f
'rweols', # 0x10
'rweolt', # 0x11
'rweolp', # 0x12
'rweolh', # 0x13
'rweom', # 0x14
'rweob', # 0x15
'rweobs', # 0x16
'rweos', # 0x17
'rweoss', # 0x18
'rweong', # 0x19
'rweoj', # 0x1a
'rweoc', # 0x1b
'rweok', # 0x1c
'rweot', # 0x1d
'rweop', # 0x1e
'rweoh', # 0x1f
'rwe', # 0x20
'rweg', # 0x21
'rwegg', # 0x22
'rwegs', # 0x23
'rwen', # 0x24
'rwenj', # 0x25
'rwenh', # 0x26
'rwed', # 0x27
'rwel', # 0x28
'rwelg', # 0x29
'rwelm', # 0x2a
'rwelb', # 0x2b
'rwels', # 0x2c
'rwelt', # 0x2d
'rwelp', # 0x2e
'rwelh', # 0x2f
'rwem', # 0x30
'rweb', # 0x31
'rwebs', # 0x32
'rwes', # 0x33
'rwess', # 0x34
'rweng', # 0x35
'rwej', # 0x36
'rwec', # 0x37
'rwek', # 0x38
'rwet', # 0x39
'rwep', # 0x3a
'rweh', # 0x3b
'rwi', # 0x3c
'rwig', # 0x3d
'rwigg', # 0x3e
'rwigs', # 0x3f
'rwin', # 0x40
'rwinj', # 0x41
'rwinh', # 0x42
'rwid', # 0x43
'rwil', # 0x44
'rwilg', # 0x45
'rwilm', # 0x46
'rwilb', # 0x47
'rwils', # 0x48
'rwilt', # 0x49
'rwilp', # 0x4a
'rwilh', # 0x4b
'rwim', # 0x4c
'rwib', # 0x4d
'rwibs', # 0x4e
'rwis', # 0x4f
'rwiss', # 0x50
'rwing', # 0x51
'rwij', # 0x52
'rwic', # 0x53
'rwik', # 0x54
'rwit', # 0x55
'rwip', # 0x56
'rwih', # 0x57
'ryu', # 0x58
'ryug', # 0x59
'ryugg', # 0x5a
'ryugs', # 0x5b
'ryun', # 0x5c
'ryunj', # 0x5d
'ryunh', # 0x5e
'ryud', # 0x5f
'ryul', # 0x60
'ryulg', # 0x61
'ryulm', # 0x62
'ryulb', # 0x63
'ryuls', # 0x64
'ryult', # 0x65
'ryulp', # 0x66
'ryulh', # 0x67
'ryum', # 0x68
'ryub', # 0x69
'ryubs', # 0x6a
'ryus', # 0x6b
'ryuss', # 0x6c
'ryung', # 0x6d
'ryuj', # 0x6e
'ryuc', # 0x6f
'ryuk', # 0x70
'ryut', # 0x71
'ryup', # 0x72
'ryuh', # 0x73
'reu', # 0x74
'reug', # 0x75
'reugg', # 0x76
'reugs', # 0x77
'reun', # 0x78
'reunj', # 0x79
'reunh', # 0x7a
'reud', # 0x7b
'reul', # 0x7c
'reulg', # 0x7d
'reulm', # 0x7e
'reulb', # 0x7f
'reuls', # 0x80
'reult', # 0x81
'reulp', # 0x82
'reulh', # 0x83
'reum', # 0x84
'reub', # 0x85
'reubs', # 0x86
'reus', # 0x87
'reuss', # 0x88
'reung', # 0x89
'reuj', # 0x8a
'reuc', # 0x8b
'reuk', # 0x8c
'reut', # 0x8d
'reup', # 0x8e
'reuh', # 0x8f
'ryi', # 0x90
'ryig', # 0x91
'ryigg', # 0x92
'ryigs', # 0x93
'ryin', # 0x94
'ryinj', # 0x95
'ryinh', # 0x96
'ryid', # 0x97
'ryil', # 0x98
'ryilg', # 0x99
'ryilm', # 0x9a
'ryilb', # 0x9b
'ryils', # 0x9c
'ryilt', # 0x9d
'ryilp', # 0x9e
'ryilh', # 0x9f
'ryim', # 0xa0
'ryib', # 0xa1
'ryibs', # 0xa2
'ryis', # 0xa3
'ryiss', # 0xa4
'rying', # 0xa5
'ryij', # 0xa6
'ryic', # 0xa7
'ryik', # 0xa8
'ryit', # 0xa9
'ryip', # 0xaa
'ryih', # 0xab
'ri', # 0xac
'rig', # 0xad
'rigg', # 0xae
'rigs', # 0xaf
'rin', # 0xb0
'rinj', # 0xb1
'rinh', # 0xb2
'rid', # 0xb3
'ril', # 0xb4
'rilg', # 0xb5
'rilm', # 0xb6
'rilb', # 0xb7
'rils', # 0xb8
'rilt', # 0xb9
'rilp', # 0xba
'rilh', # 0xbb
'rim', # 0xbc
'rib', # 0xbd
'ribs', # 0xbe
'ris', # 0xbf
'riss', # 0xc0
'ring', # 0xc1
'rij', # 0xc2
'ric', # 0xc3
'rik', # 0xc4
'rit', # 0xc5
'rip', # 0xc6
'rih', # 0xc7
'ma', # 0xc8
'mag', # 0xc9
'magg', # 0xca
'mags', # 0xcb
'man', # 0xcc
'manj', # 0xcd
'manh', # 0xce
'mad', # 0xcf
'mal', # 0xd0
'malg', # 0xd1
'malm', # 0xd2
'malb', # 0xd3
'mals', # 0xd4
'malt', # 0xd5
'malp', # 0xd6
'malh', # 0xd7
'mam', # 0xd8
'mab', # 0xd9
'mabs', # 0xda
'mas', # 0xdb
'mass', # 0xdc
'mang', # 0xdd
'maj', # 0xde
'mac', # 0xdf
'mak', # 0xe0
'mat', # 0xe1
'map', # 0xe2
'mah', # 0xe3
'mae', # 0xe4
'maeg', # 0xe5
'maegg', # 0xe6
'maegs', # 0xe7
'maen', # 0xe8
'maenj', # 0xe9
'maenh', # 0xea
'maed', # 0xeb
'mael', # 0xec
'maelg', # 0xed
'maelm', # 0xee
'maelb', # 0xef
'maels', # 0xf0
'maelt', # 0xf1
'maelp', # 0xf2
'maelh', # 0xf3
'maem', # 0xf4
'maeb', # 0xf5
'maebs', # 0xf6
'maes', # 0xf7
'maess', # 0xf8
'maeng', # 0xf9
'maej', # 0xfa
'maec', # 0xfb
'maek', # 0xfc
'maet', # 0xfd
'maep', # 0xfe
'maeh', # 0xff
)
| gpl-2.0 |
sosolimited/Cinder | docs/libs/bs4/builder/_html5lib.py | 423 | 10647 | __all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| bsd-2-clause |
darkleons/BE | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 35 | 34024 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dreikanter/public-static | publicstatic/conf.py | 1 | 6519 | # coding: utf-8
"""Configuration-related fuctionality and defaults."""
import codecs
from datetime import datetime
import os
import yaml
from publicstatic import const
from publicstatic import errors
from publicstatic.version import __version__
_params = {} # Configuration parameters
_path = '' # Configuration file absolute path
class NotFoundException(errors.BasicException):
"""configuration file not found"""
pass
class ParsingError(errors.BasicException):
"""error reading configuration file"""
pass
class ConfigurationExistsException(errors.BasicException):
"""configuration file already exists; use --force to overwrite"""
pass
class NotInitializedException(errors.BasicException):
"""configuration was not initialized"""
pass
def path():
if not _path:
raise NotInitializedException()
return _path
def defaults():
"""Returns default configuration."""
return {key: value['value'] for key, value in const.DEFAULTS.items()}
def load(conf_path):
"""Initializes configuration."""
global _path
_path = find_conf(conf_path or '.')
if not _path:
raise NotFoundException()
try:
with codecs.open(_path, mode='r', encoding='utf-8') as f:
loaded = yaml.load(f.read())
except (IOError, OSError, yaml.scanner.ScannerError) as ex:
raise ParsingError(error=str(ex)) from ex
global _params
_params = defaults()
_params.update(dict((item, loaded[item]) for item in loaded))
_params = _purify(_params)
def generate(conf_path, force):
"""Generates new configuration file using defaults."""
global _path
_path = os.path.join(os.path.abspath(conf_path), const.CONF_NAME)
if not force and os.path.exists(_path):
raise ConfigurationExistsException(path=_path)
dir_path = os.path.dirname(path())
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
header = "# %s\n\n" % const.CONF_HEADER
exports = [opt for opt in const.DEFAULTS.keys() if opt in const.EXPORTS]
text = '\n'.join([_dumpopt(opt) for opt in exports])
with codecs.open(_path, mode='w', encoding='utf-8') as f:
f.write(header + text)
global _params
_params = _purify(defaults())
def find_conf(conf_path):
"""Walks from the specified directory path up to the root until
configuration file will be found. Returns full configuration file path
or None if there are no one."""
seps = os.path.sep + (os.path.altsep or '')
path = os.path.abspath(conf_path).rstrip(seps)
last = True
while last:
result = os.path.join(path, const.CONF_NAME)
if os.path.exists(result):
return result
path, last = os.path.split(path)
return None
def get(param, default=None):
"""Returns a single configuration parameter or default value."""
try:
return _params.get(param, default)
except TypeError:
raise NotInitializedException()
def set(param, value):
"""Set or override configuration parameter."""
_params[param] = value
def tags_rel_url():
return os.path.dirname(get('rel_root_url') + get('tag_location')) + '/'
def commons():
"""Site-wide environmental parameters for page building."""
return {
'root_url': get('root_url'),
'rel_root_url': get('rel_root_url'),
'site_title': get('title'),
'site_subtitle': get('subtitle'),
'menu': get('menu'),
'time': datetime.now(),
'author': get('author'),
'author_twitter': get('author_twitter'),
'author_url': get('author_url'),
'generator': const.GENERATOR,
'generator_url': const.GENERATOR_URL,
'generator_version': __version__,
'source_url': get('source_url'),
'enable_search_form': get('enable_search_form'),
'atom_url': get('root_url') + get('atom_location'),
'archive_rel_url': get('rel_root_url') + get('archive_location'),
'tags_rel_url': tags_rel_url(),
'sitemap_url': get('rel_root_url') + 'sitemap.xml',
'author_location': get('humans_author_location'),
'language': get('humans_language'),
'doctype': get('humans_doctype'),
'ide': get('humans_ide'),
'last_updated': datetime.now(),
'disqus_id': get('disqus_id'),
'addthis_id': get('addthis_id'),
'pluso_enabled': get('pluso_enabled'),
'google_analytics_id': get('google_analytics_id'),
'datetime_format': get('datetime_format'),
'date_format': get('date_format'),
'opengraph_enabled': get('opengraph_enabled'),
'twittercards_enabled': get('twittercards_enabled'),
'site_twitter': get('site_twitter'),
}
def _dumpopt(opt_name):
"""Serializes configuration option with default value."""
desc = const.DEFAULTS[opt_name]['desc']
desc = ("# %s\n" % desc) if desc else ''
return desc + yaml.dump({
opt_name: const.DEFAULTS[opt_name]['value']
}, width=79, indent=2, default_flow_style=False)
def _purify(params):
"""Preprocess configuration parameters."""
expandables = [
'build_path',
'log_file',
]
for param in expandables:
params[param] = _expand(params[param])
urls = [
'root_url',
'rel_root_url',
'source_url',
]
for param in urls:
params[param] = _trsl(params[param].strip())
integers = [
'port',
'log_max_size',
'log_backup_cnt',
]
for param in integers:
params[param] = int(params[param])
if isinstance(params['time_format'], str):
params['time_format'] = [params['time_format']]
menu = params['menu']
for item in menu:
item['href'] = item['href'].strip() if 'href' in item else ''
item['title'] = item['title'].strip() if 'title' in item else ''
params['verbose'] = params['verbose'] or const.ENV_VERBOSE in os.environ
return params
def _expand(rel_path):
"""Expands relative path using configuration file location as base
directory. Absolute pathes will be returned as is."""
path = os.path.expandvars(os.path.expanduser(rel_path))
if not os.path.isabs(path):
base = os.path.dirname(os.path.abspath(_path))
path = os.path.join(base, path)
seps = os.path.sep + (os.path.altsep or '')
return path.rstrip(seps)
def _trsl(url):
"""Guarantees the URL have a single trailing slash."""
return url.rstrip('/') + '/'
| bsd-3-clause |
midma101/m0du1ar | .venv/lib/python2.7/site-packages/pip/vcs/mercurial.py | 280 | 4974 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip._vendor.six.moves import configparser
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
self.run_command(
['archive', location], show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'], cwd=dest)
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['clone', '--noupdate', '-q', url, dest])
self.run_command(['update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = self.run_command(
['showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = self.run_command(['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = self.run_command(
['branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = self.run_command(
['parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = self.run_command(
['parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (
egg_project_name,
branch_revs[current_rev],
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial)
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-commerce/azure/mgmt/commerce/models/recurring_charge.py | 1 | 1457 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .offer_term_info import OfferTermInfo
class RecurringCharge(OfferTermInfo):
"""Indicates a recurring charge is present for this offer.
:param effective_date: Indicates the date from which the offer term is
effective.
:type effective_date: datetime
:param name: Constant filled by server.
:type name: str
:param recurring_charge: The amount of recurring charge as per the offer
term.
:type recurring_charge: int
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'effective_date': {'key': 'EffectiveDate', 'type': 'iso-8601'},
'name': {'key': 'Name', 'type': 'str'},
'recurring_charge': {'key': 'RecurringCharge', 'type': 'int'},
}
def __init__(self, effective_date=None, recurring_charge=None):
super(RecurringCharge, self).__init__(effective_date=effective_date)
self.recurring_charge = recurring_charge
self.name = 'Recurring Charge'
| mit |
js850/PyGMIN | examples/gui/NewLJ.py | 1 | 3256 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'NewLJ.ui'
#
# Created: Thu May 10 03:10:06 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_DialogLJSetup(object):
def setupUi(self, DialogLJSetup):
DialogLJSetup.setObjectName(_fromUtf8("DialogLJSetup"))
DialogLJSetup.resize(349, 144)
DialogLJSetup.setWindowTitle(QtGui.QApplication.translate("DialogLJSetup", "Create new Lennard-Jones system", None, QtGui.QApplication.UnicodeUTF8))
DialogLJSetup.setModal(True)
self.buttonBox = QtGui.QDialogButtonBox(DialogLJSetup)
self.buttonBox.setGeometry(QtCore.QRect(20, 100, 301, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayoutWidget = QtGui.QWidget(DialogLJSetup)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 20, 301, 61))
self.gridLayoutWidget.setObjectName(_fromUtf8("gridLayoutWidget"))
self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.gridLayoutWidget)
self.label_2.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of minima to save", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.lineNatoms = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineNatoms.setInputMask(_fromUtf8(""))
self.lineNatoms.setText(QtGui.QApplication.translate("DialogLJSetup", "13", None, QtGui.QApplication.UnicodeUTF8))
self.lineNatoms.setObjectName(_fromUtf8("lineNatoms"))
self.gridLayout.addWidget(self.lineNatoms, 1, 1, 1, 1)
self.lineNsave = QtGui.QLineEdit(self.gridLayoutWidget)
self.lineNsave.setInputMask(QtGui.QApplication.translate("DialogLJSetup", "999; ", None, QtGui.QApplication.UnicodeUTF8))
self.lineNsave.setText(QtGui.QApplication.translate("DialogLJSetup", "50", None, QtGui.QApplication.UnicodeUTF8))
self.lineNsave.setObjectName(_fromUtf8("lineNsave"))
self.gridLayout.addWidget(self.lineNsave, 2, 1, 1, 1)
self.label = QtGui.QLabel(self.gridLayoutWidget)
self.label.setText(QtGui.QApplication.translate("DialogLJSetup", "Number of particles", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.retranslateUi(DialogLJSetup)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), DialogLJSetup.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), DialogLJSetup.reject)
QtCore.QMetaObject.connectSlotsByName(DialogLJSetup)
def retranslateUi(self, DialogLJSetup):
pass
| gpl-3.0 |
AlexStarov/Shop | applications/sms_ussd/migrations/0001_initial.py | 1 | 2850 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SendSMS',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sessionid', models.CharField(max_length=32, null=True, verbose_name='SessionID', blank=True)),
('task_id', models.CharField(max_length=255, null=True, verbose_name='task id', blank=True)),
('send', models.BooleanField(default=False, verbose_name='\u041e\u0442\u043f\u0440\u0430\u0432\u043b\u0435\u043d\u043e')),
('code', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='\u041a\u043e\u0434 \u043f\u0440\u043e\u0432\u0430\u0439\u0434\u0435\u0440\u0430', choices=[(39, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80 (Golden Telecom)'), (50, b'Vodafone'), (63, b'Life:)'), (66, b'Vodafone'), (67, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (68, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80 (Beeline)'), (91, b'Utel'), (92, b'PEOPLEnet'), (93, b'Life:)'), (94, b'\xd0\x98\xd0\xbd\xd1\x82\xd0\xb5\xd1\x80\xd1\x82\xd0\xb5\xd0\xbb\xd0\xb5\xd0\xba\xd0\xbe\xd0\xbc'), (95, b'Vodafone'), (96, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (97, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (98, b'\xd0\x9a\xd0\xb8\xd0\xb5\xd0\xb2\xd1\x81\xd1\x82\xd0\xb0\xd1\x80'), (99, b'Vodafone')])),
('phone', models.CharField(max_length=7, null=True, verbose_name='\u0422\u0435\u043b\u0435\u0444\u043e\u043d', blank=True)),
('message', models.TextField(null=True, verbose_name='\u0421\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u0435', blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430 \u0441\u043e\u0437\u0434\u0430\u043d\u0438\u044f', null=True)),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='\u0414\u0430\u0442\u0430 \u043e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f', null=True)),
('user', models.ForeignKey(verbose_name='\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ['-created_at'],
'db_table': 'SMS_USSD_SendSMS',
'verbose_name': 'SendSMS',
'verbose_name_plural': 'SendSMS',
},
),
]
| apache-2.0 |
linglung/ytdl | youtube_dl/extractor/dw.py | 84 | 4098 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
from ..compat import compat_urlparse
class DWIE(InfoExtractor):
IE_NAME = 'dw'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
_TESTS = [{
# video
'url': 'http://www.dw.com/en/intelligent-light/av-19112290',
'md5': '7372046e1815c5a534b43f3c3c36e6e9',
'info_dict': {
'id': '19112290',
'ext': 'mp4',
'title': 'Intelligent light',
'description': 'md5:90e00d5881719f2a6a5827cb74985af1',
'upload_date': '20160311',
}
}, {
# audio
'url': 'http://www.dw.com/en/worldlink-my-business/av-19111941',
'md5': '2814c9a1321c3a51f8a7aeb067a360dd',
'info_dict': {
'id': '19111941',
'ext': 'mp3',
'title': 'WorldLink: My business',
'description': 'md5:bc9ca6e4e063361e21c920c53af12405',
'upload_date': '20160311',
}
}, {
# DW documentaries, only last for one or two weeks
'url': 'http://www.dw.com/en/documentaries-welcome-to-the-90s-2016-05-21/e-19220158-9798',
'md5': '56b6214ef463bfb9a3b71aeb886f3cf1',
'info_dict': {
'id': '19274438',
'ext': 'mp4',
'title': 'Welcome to the 90s – Hip Hop',
'description': 'Welcome to the 90s - The Golden Decade of Hip Hop',
'upload_date': '20160521',
},
'skip': 'Video removed',
}]
def _real_extract(self, url):
media_id = self._match_id(url)
webpage = self._download_webpage(url, media_id)
hidden_inputs = self._hidden_inputs(webpage)
title = hidden_inputs['media_title']
media_id = hidden_inputs.get('media_id') or media_id
if hidden_inputs.get('player_type') == 'video' and hidden_inputs.get('stream_file') == '1':
formats = self._extract_smil_formats(
'http://www.dw.com/smil/v-%s' % media_id, media_id,
transform_source=lambda s: s.replace(
'rtmp://tv-od.dw.de/flash/',
'http://tv-download.dw.de/dwtv_video/flv/'))
self._sort_formats(formats)
else:
formats = [{'url': hidden_inputs['file_name']}]
upload_date = hidden_inputs.get('display_date')
if not upload_date:
upload_date = self._html_search_regex(
r'<span[^>]+class="date">([0-9.]+)\s*\|', webpage,
'upload date', default=None)
upload_date = unified_strdate(upload_date)
return {
'id': media_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': hidden_inputs.get('preview_image'),
'duration': int_or_none(hidden_inputs.get('file_duration')),
'upload_date': upload_date,
'formats': formats,
}
class DWArticleIE(InfoExtractor):
IE_NAME = 'dw:article'
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
_TEST = {
'url': 'http://www.dw.com/en/no-hope-limited-options-for-refugees-in-idomeni/a-19111009',
'md5': '8ca657f9d068bbef74d6fc38b97fc869',
'info_dict': {
'id': '19105868',
'ext': 'mp4',
'title': 'The harsh life of refugees in Idomeni',
'description': 'md5:196015cc7e48ebf474db9399420043c7',
'upload_date': '20160310',
}
}
def _real_extract(self, url):
article_id = self._match_id(url)
webpage = self._download_webpage(url, article_id)
hidden_inputs = self._hidden_inputs(webpage)
media_id = hidden_inputs['media_id']
media_path = self._search_regex(r'href="([^"]+av-%s)"\s+class="overlayLink"' % media_id, webpage, 'media url')
media_url = compat_urlparse.urljoin(url, media_path)
return self.url_result(media_url, 'DW', media_id)
| unlicense |
motion2015/a3 | common/lib/xmodule/xmodule/modulestore/perf_tests/generate_report.py | 194 | 10493 |
"""
Reads the data generated by performance tests and generates a savable
report which can be viewed over time to examine the performance effects of code changes on
various parts of the system.
"""
import sqlite3
from lxml.builder import E
import lxml.html
try:
import click
except ImportError:
click = None
DB_NAME = 'block_times.db'
class HTMLTable(object):
"""
Simple wrapper for an HTML table.
"""
def __init__(self, hdr_columns):
self.table = E.TABLE()
col_headers = [E.TH(x) for x in hdr_columns]
header_row = E.TR(*col_headers)
self.table.append(header_row)
def add_row(self, items):
"""Add row to table."""
row_items = [E.TD(x) for x in items]
self.table.append(E.TR(*row_items))
def tostring(self):
"""Output table HTML as string."""
return lxml.html.tostring(self.table)
@staticmethod
def style():
""" Return a hard-coded table style."""
return E.style("""
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 5px;
}"""
) # pylint: disable=bad-continuation
class HTMLDocument(object):
"""
Simple wrapper for an entire HTML document.
"""
def __init__(self, title):
self.html = E.html(E.head(E.title(title), HTMLTable.style()))
self.body = E.body()
self.html.append(self.body)
def add_header(self, level, text):
"""Add a header to the document."""
func_name = "H{}".format(level)
self.body.append(getattr(E, func_name)(text))
def add_to_body(self, elem):
"""Add to document body."""
self.body.append(elem)
def tostring(self, pretty_print=False):
"""Output HTML document as string."""
return lxml.html.tostring(self.html, pretty_print=pretty_print)
class ReportGenerator(object):
"""
Base class for report generation.
"""
def __init__(self, db_name):
# Read data from all modulestore combos.
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
sel_sql = 'select id, run_id, block_desc, elapsed, timestamp FROM block_times ORDER BY run_id DESC'
cur = conn.cursor()
cur.execute(sel_sql)
self.all_rows = cur.fetchall()
class ImportExportReportGen(ReportGenerator):
"""
Class which generates report for course import/export performance test data.
"""
def __init__(self, db_name):
super(ImportExportReportGen, self).__init__(db_name)
self._read_timing_data()
def _read_timing_data(self):
"""
Read in the timing data from the sqlite DB and save into a dict.
"""
self.run_data = {}
self.all_modulestore_combos = set()
for row in self.all_rows:
time_taken = row[3]
# Split apart the description into its parts.
desc_parts = row[2].split(':')
modulestores = desc_parts[1]
self.all_modulestore_combos.add(modulestores)
amount_md = desc_parts[2]
test_phase = 'all'
if len(desc_parts) > 3:
test_phase = desc_parts[3]
# Save the data in a multi-level dict - { phase1: { amount1: {ms1->ms2: duration, ...}, ...}, ...}.
phase_data = self.run_data.setdefault(test_phase, {})
amount_data = phase_data.setdefault(amount_md, {})
__ = amount_data.setdefault(modulestores, time_taken)
def generate_html(self):
"""
Generate HTML.
"""
html = HTMLDocument("Results")
# Output comparison of each phase to a different table.
for phase in self.run_data.keys():
if phase in ('fake_assets',):
continue
per_phase = self.run_data[phase]
html.add_header(1, phase)
title_map = {
'duration': 'Total Duration (ms)',
'ratio': 'Total Duration Per Number of Assets (ms/asset)',
'variable_cost': 'Asset Export Duration Per Number of Assets (ms/asset)'
}
for table_type in ('duration', 'ratio', 'variable_cost'):
if phase == 'all' and table_type in ('ratio', 'variable_cost'):
continue
# Make the table header columns and the table.
columns = ["Asset Metadata Amount", ]
ms_keys = sorted(self.all_modulestore_combos)
for k in ms_keys:
columns.append("{} ({})".format(k, table_type))
phase_table = HTMLTable(columns)
# Make a row for each amount of asset metadata.
for amount in sorted(per_phase.keys()):
per_amount = per_phase[amount]
num_assets = int(amount)
row = [amount, ]
for modulestore in ms_keys:
if table_type == 'duration':
value = per_amount[modulestore]
elif table_type == 'ratio':
if num_assets != 0:
value = per_amount[modulestore] / float(amount)
else:
value = 0
elif table_type == 'variable_cost':
if num_assets == 0:
value = 0
else:
value = (per_amount[modulestore] - per_phase['0'][modulestore]) / float(amount)
row.append("{}".format(value))
phase_table.add_row(row)
# Add the table title and the table.
html.add_header(2, title_map[table_type])
html.add_to_body(phase_table.table)
return html
class FindReportGen(ReportGenerator):
"""
Class which generates report for asset access performance test data.
"""
def __init__(self, db_name):
super(FindReportGen, self).__init__(db_name)
self._read_timing_data()
def _read_timing_data(self):
"""
Read in the timing data from the sqlite DB and save into a dict.
"""
self.run_data = {}
self.all_modulestores = set()
for row in self.all_rows:
time_taken = row[3]
# Split apart the description into its parts.
desc_parts = row[2].split(':')
if desc_parts[0] != 'FindAssetTest':
continue
modulestore, amount_md = desc_parts[1:3]
self.all_modulestores.add(modulestore)
test_phase = 'all'
sort = None
if len(desc_parts) >= 4:
test_phase = desc_parts[3]
if len(desc_parts) >= 5:
sort = desc_parts[4]
# Save the data in a multi-level dict:
# { phase1: { [sort1: {] amount1: { modulestore1: duration, ...}, ...}, ...}.
phase_data = self.run_data.setdefault(test_phase, {})
if test_phase == 'get_asset_list':
# Add a level here for the sort.
phase_data = phase_data.setdefault(sort, {})
amount_data = phase_data.setdefault(amount_md, {})
__ = amount_data.setdefault(modulestore, time_taken)
def generate_html(self):
"""
Generate HTML.
"""
html = HTMLDocument("Results")
# Output comparison of each phase to a different table.
# for store in self.run_data.keys():
# per_phase = self.run_data[store]
# html.add_header(1, store)
for phase in self.run_data.keys():
per_phase = self.run_data[phase]
# Make the table header columns and the table.
columns = ["Asset Metadata Amount", ]
ms_keys = sorted(self.all_modulestores)
for k in ms_keys:
columns.append("Time Taken (ms) ({})".format(k))
phase_table = HTMLTable(columns)
if phase != 'get_asset_list':
for amount in sorted(per_phase.keys()):
per_amount = per_phase[amount]
row = [amount, ]
for modulestore in ms_keys:
time_taken = per_amount[modulestore]
row.append("{}".format(time_taken))
phase_table.add_row(row)
html.add_header(2, phase)
html.add_to_body(phase_table.table)
else:
# get_asset_list phase includes the sort as well.
html.add_header(2, phase)
for sort in per_phase.keys():
sort_table = HTMLTable(columns)
per_sort = per_phase[sort]
for amount in sorted(per_sort.keys()):
per_amount = per_sort[amount]
row = [amount, ]
for modulestore in ms_keys:
# Each sort has two different ranges retrieved.
time_taken = per_amount[modulestore] / 2.0
row.append("{}".format(time_taken))
sort_table.add_row(row)
html.add_header(3, sort)
html.add_to_body(sort_table.table)
return html
if click is not None:
@click.command()
@click.argument('outfile', type=click.File('w'), default='-', required=False)
@click.option('--db_name', help='Name of sqlite database from which to read data.', default=DB_NAME)
@click.option('--data_type', help='Data type to process. One of: "imp_exp" or "find"', default="find")
def cli(outfile, db_name, data_type):
"""
Generate an HTML report from the sqlite timing data.
"""
if data_type == 'imp_exp':
ie_gen = ImportExportReportGen(db_name)
html = ie_gen.generate_html()
elif data_type == 'find':
f_gen = FindReportGen(db_name)
html = f_gen.generate_html()
click.echo(html.tostring(), file=outfile)
if __name__ == '__main__':
if click is not None:
cli() # pylint: disable=no-value-for-parameter
else:
print "Aborted! Module 'click' is not installed."
| agpl-3.0 |
EKiefer/edge-starter | py34env/Lib/site-packages/pip/_vendor/packaging/specifiers.py | 26 | 27825 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| mit |
cctaylor/googleads-python-lib | examples/dfp/v201411/line_item_service/get_all_line_items.py | 4 | 1764 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line items.
To create line items, run create_line_items.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get line items by statement.
while True:
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for line_item in response['results']:
print ('Line item with id \'%s\', belonging to order id \'%s\', and '
'named \'%s\' was found.' %
(line_item['id'], line_item['orderId'], line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
740521985/nw.js | tools/aws_uploader.py | 42 | 3658 | #!/usr/bin/env python
import argparse
import boto
import datetime
import json
import os
import sys
import time
# Set timeout, for retry
#if not boto.config.has_section('Boto'):
# boto.config.add_section('Boto')
#boto.config.set('Boto','http_socket_timeout','30')
################################
# Parse command line args
parser = argparse.ArgumentParser(description='AWS uploader, please fill in your aws key and id in Boto config (~/.boto)')
parser.add_argument('-p','--path', help='Optional. Where to find the binaries, normally out/Release/dist', required=False)
parser.add_argument('-b','--buildername', help='Builder name, e.g. linux_32bit', required=True)
parser.add_argument('-r','--revision', help='Commit revision',required=True)
parser.add_argument('-n','--number', help='Build number', required=True)
parser.add_argument('-t','--bucket', help='AWS bucket name', required=True)
parser.add_argument('-d','--dlpath', help='AWS bucket path', required=True)
args = parser.parse_args()
################################
# Check and init variables
dist_dir = args.path
builder_name = args.buildername
got_revision = args.revision
build_number = args.number
bucket_name = args.bucket
dlpath = args.dlpath
date = datetime.date.today().strftime('%m-%d-%Y')
# If the binaries location is not given, calculate it from script related dir.
if dist_dir == None:
dist_dir = os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'out', 'Release')
dist_dir = os.path.join(dist_dir, 'dist')
if not os.path.isabs(dist_dir):
dist_dir = os.path.join(os.getcwd(), dist_dir)
if not os.path.isdir(dist_dir):
print 'Invalid path: ' + dist_dir
exit(-1)
dist_dir = os.path.normpath(dist_dir)
# it's for S3, so always use '/' here
#upload_path = ''.join(['/' + date,
# '/' + builder_name + '-build-' + build_number + '-' + got_revision])
upload_path = '/' + dlpath;
file_list = os.listdir(dist_dir)
if len(file_list) == 0:
print 'Cannot find packages!'
exit(-1)
# move node-webkit- to the top of the list.
for i in range(len(file_list)):
fname = file_list[i]
if fname.startswith('nwjs-v') or fname.startswith('nwjs-sdk-v'):
del file_list[i]
file_list.insert(0,fname)
break
def print_progress(transmitted, total):
print ' %d%% transferred of total: %d bytes.' % (transmitted*100/total, total)
sys.stdout.flush()
def aws_upload(upload_path, file_list):
conn = boto.connect_s3()
print 'Connecting to S3 ...'
sys.stdout.flush()
bucket = conn.get_bucket(bucket_name)
print 'Uploading to: ' + upload_path
for f in file_list:
print 'Uploading "' + f + '" ...'
sys.stdout.flush()
# use '/' for s3
path_prefix = ''
if (f == 'nw.lib' or f == 'nw.exp') :
if builder_name != 'nw13_win64' and builder_name != 'nw13_win32' :
continue
if builder_name == 'nw13_win64' :
path_prefix = 'x64'
if f.startswith('nw-headers') and builder_name != 'nw13_mac64' :
continue
if f.startswith('chromedriver') and 'sdk' not in builder_name :
continue
key = bucket.new_key(upload_path + '/' + path_prefix + '/' + f)
key.set_contents_from_filename(filename=os.path.join(dist_dir, f), cb=print_progress, num_cb=50, replace=True)
for retry in range(3):
try:
aws_upload(upload_path, file_list)
break
except Exception, e:
print e
sys.stdout.flush()
time.sleep(30) #wait for 30s and try again.
print 'Done.'
# vim: et:ts=4:sw=4
| mit |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/setuptools/compat.py | 456 | 2094 | import sys
import itertools
PY3 = sys.version_info >= (3,)
PY2 = not PY3
if PY2:
basestring = basestring
import __builtin__ as builtins
import ConfigParser
from StringIO import StringIO
BytesIO = StringIO
func_code = lambda o: o.func_code
func_globals = lambda o: o.func_globals
im_func = lambda o: o.im_func
from htmlentitydefs import name2codepoint
import httplib
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import BaseHTTPRequestHandler
iteritems = lambda o: o.iteritems()
long_type = long
maxsize = sys.maxint
unichr = unichr
unicode = unicode
bytes = str
from urllib import url2pathname, splittag, pathname2url
import urllib2
from urllib2 import urlopen, HTTPError, URLError, unquote, splituser
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
filterfalse = itertools.ifilterfalse
exec("""def reraise(tp, value, tb=None):
raise tp, value, tb""")
if PY3:
basestring = str
import builtins
import configparser as ConfigParser
from io import StringIO, BytesIO
func_code = lambda o: o.__code__
func_globals = lambda o: o.__globals__
im_func = lambda o: o.__func__
from html.entities import name2codepoint
import http.client as httplib
from http.server import HTTPServer, SimpleHTTPRequestHandler
from http.server import BaseHTTPRequestHandler
iteritems = lambda o: o.items()
long_type = int
maxsize = sys.maxsize
unichr = chr
unicode = str
bytes = bytes
from urllib.error import HTTPError, URLError
import urllib.request as urllib2
from urllib.request import urlopen, url2pathname, pathname2url
from urllib.parse import (
urlparse, urlunparse, unquote, splituser, urljoin, urlsplit,
urlunsplit, splittag,
)
filterfalse = itertools.filterfalse
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
| mit |
djeo94/CouchPotatoServer | couchpotato/core/notifications/pushover.py | 45 | 2903 | from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import getTitle, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
log = CPLog(__name__)
autoload = 'Pushover'
class Pushover(Notification):
api_url = 'https://api.pushover.net'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
api_data = {
'user': self.conf('user_key'),
'token': self.conf('api_token'),
'message': toUnicode(message),
'priority': self.conf('priority'),
'sound': self.conf('sound'),
}
if data and getIdentifier(data):
api_data.update({
'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)),
'url_title': toUnicode('%s on IMDb' % getTitle(data)),
})
try:
data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'),
headers = {'Content-type': 'application/x-www-form-urlencoded'},
data = api_data)
log.info2('Pushover responded with: %s', data)
return True
except:
return False
config = [{
'name': 'pushover',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'pushover',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'user_key',
'description': 'Register on pushover.net to get one.'
},
{
'name': 'api_token',
'description': '<a href="https://pushover.net/apps/clone/couchpotato" target="_blank">Register on pushover.net</a> to get one.',
'advanced': True,
'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy',
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Lowest', -2), ('Low', -1), ('Normal', 0), ('High', 1)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
{
'name': 'sound',
'advanced': True,
'description': 'Define <a href="https://pushover.net/api%23sounds" target="_blank">custom sound</a> for Pushover alert.'
},
],
}
],
}]
| gpl-3.0 |
martenson/tools-iuc | tools/gemini/gemini_mafify.py | 16 | 9077 | import string
import sys
so_to_maf = {
'splice_acceptor_variant': 'Splice_Site',
'splice_donor_variant': 'Splice_Site',
'transcript_ablation': 'Splice_Site',
'exon_loss_variant': 'Splice_Site',
'stop_gained': 'Nonsense_Mutation',
'stop_lost': 'Nonstop_Mutation',
'frameshift_variant': 'Frame_Shift_',
'initiator_codon_variant': 'Translation_Start_Site',
'start_lost': 'Translation_Start_Site',
'inframe_insertion': 'In_Frame_Ins',
'inframe_deletion': 'In_Frame_Del',
'conservative_inframe_insertion': 'In_Frame_Ins',
'conservative_inframe_deletion': 'In_Frame_Del',
'disruptive_inframe_insertion': 'In_Frame_Ins',
'disruptive_inframe_deletion': 'In_Frame_Del',
'missense_variant': 'Missense_Mutation',
'coding_sequence_variant': 'Missense_Mutation',
'conservative_missense_variant': 'Missense_Mutation',
'rare_amino_acid_variant': 'Missense_Mutation',
'transcript_amplification': 'Intron',
'intron_variant': 'Intron',
'INTRAGENIC': 'Intron',
'intragenic_variant': 'Intron',
'splice_region_variant': 'Splice_Region',
'mature_miRNA_variant': 'RNA',
'exon_variant': 'RNA',
'non_coding_exon_variant': 'RNA',
'non_coding_transcript_exon_variant': 'RNA',
'non_coding_transcript_variant': 'RNA',
'nc_transcript_variant': 'RNA',
'stop_retained_variant': 'Silent',
'synonymous_variant': 'Silent',
'NMD_transcript_variant': 'Silent',
'incomplete_terminal_codon_variant': 'Silent',
'5_prime_UTR_variant': "5'UTR",
'5_prime_UTR_premature_start_codon_gain_variant': "5'UTR",
'3_prime_UTR_variant': "3'UTR",
'intergenic_variant': 'IGR',
'intergenic_region': 'IGR',
'regulatory_region_variant': 'IGR',
'regulatory_region': 'IGR',
'TF_binding_site_variant': 'IGR',
'upstream_gene_variant': "5'Flank",
'downstream_gene_variant': "3'Flank",
}
class VariantEffect():
def __init__(self, variant_type):
self.variant_type = variant_type.capitalize()
assert self.variant_type in ['Snp', 'Ins', 'Del']
def __getitem__(self, so_effect):
if so_effect not in so_to_maf or (
'frame' in so_effect and self.variant_type == 'Snp'
):
return 'Targeted_Region'
ret = so_to_maf[so_effect]
if ret == 'Frame_Shift_':
ret += self.variant_type
return ret
infile = sys.argv[1]
if len(sys.argv) > 2:
tumor_sample_name = sys.argv[2]
if len(sys.argv) > 3:
normal_sample_name = sys.argv[3]
start_pos_idx = None
ref_idx = None
alt_idx = None
variant_type_idx = None
variant_classification_idx = None
gt_alt_depths_idx = {}
gt_ref_depths_idx = {}
gts_idx = {}
samples = set()
required_fields = [
'Hugo_Symbol',
'NCBI_Build',
'Variant_Type',
'Variant_Classification',
'Tumor_Sample_Barcode',
'HGVSp_Short'
]
with open(infile) as data_in:
cols = data_in.readline().rstrip().split('\t')
for field in required_fields:
if field not in cols:
raise IndexError(
'Cannot generate valid MAF without the following input '
'columns: {0}.\n'
'Missing column: "{1}"'
.format(required_fields, field)
)
for i, col in enumerate(cols):
if col == 'Variant_Type':
variant_type_idx = i
elif col == 'Variant_Classification':
variant_classification_idx = i
elif col == 'Start_Position':
start_pos_idx = i
elif col == 'Reference_Allele':
ref_idx = i
elif col == 'alt':
alt_idx = i
else:
column, _, sample = col.partition('.')
if sample:
if column == 'gt_alt_depths':
gt_alt_depths_idx[sample] = i
elif column == 'gt_ref_depths':
gt_ref_depths_idx[sample] = i
elif column == 'gts':
gts_idx[sample] = i
else:
# not a recognized sample-specific column
continue
samples.add(sample)
if ref_idx is None:
raise IndexError('Input file does not have a column "Reference_Allele".')
if not tumor_sample_name:
if normal_sample_name:
raise ValueError(
'Normal sample name requires the tumor sample name to be '
'specified, too.'
)
if len(samples) > 1:
raise ValueError(
'A tumor sample name is required with more than one sample '
'in the input.'
)
if samples:
# There is a single sample with genotype data.
# Assume its the tumor sample.
tumor_sample_name = next(iter(samples))
else:
if tumor_sample_name not in samples:
raise ValueError(
'Could not find information about the specified tumor sample '
'in the input.'
)
if tumor_sample_name == normal_sample_name:
raise ValueError(
'Need different names for the normal and the tumor sample.'
)
if normal_sample_name and normal_sample_name not in samples:
raise ValueError(
'Could not find information about the specified normal sample '
'in the input.'
)
# All input data checks passed!
# Now extract just the relevant index numbers for the tumor/normal pair
gts_idx = (
gts_idx.get(tumor_sample_name, alt_idx),
gts_idx.get(normal_sample_name)
)
gt_alt_depths_idx = (
gt_alt_depths_idx.get(tumor_sample_name),
gt_alt_depths_idx.get(normal_sample_name)
)
gt_ref_depths_idx = (
gt_ref_depths_idx.get(tumor_sample_name),
gt_ref_depths_idx.get(normal_sample_name)
)
# Echo all MAF column names
cols_to_print = []
for n in range(len(cols)):
if n in gts_idx:
continue
if n in gt_alt_depths_idx:
continue
if n in gt_ref_depths_idx:
continue
if n != alt_idx:
cols_to_print.append(n)
print('\t'.join([cols[n] for n in cols_to_print]))
for line in data_in:
cols = line.rstrip().split('\t')
gt_alt_depths = [
int(cols[ad_idx]) if ad_idx else ''
for ad_idx in gt_alt_depths_idx
]
gt_ref_depths = [
int(cols[rd_idx]) if rd_idx else ''
for rd_idx in gt_ref_depths_idx
]
gts = [
['', ''],
['', '']
]
for n, gt_idx in enumerate(gts_idx):
if gt_idx:
gt_sep = '/' if '/' in cols[gt_idx] else '|'
allele1, _, allele2 = [
'' if allele == '.' else allele
for allele in cols[gt_idx].partition(gt_sep)
]
# follow cBioportal recommendation to leave allele1 empty
# when information is not avaliable
if not allele2:
gts[n] = [allele2, allele1]
else:
gts[n] = [allele1, allele2]
if not gts:
gts = [['', ''], ['', '']]
if cols[variant_type_idx].lower() in ['ins', 'del']:
# transform VCF-style indel representations into MAF ones
ref_allele = cols[ref_idx]
for n, nucs in enumerate(
zip(
ref_allele,
*[allele for gt in gts for allele in gt if allele]
)
):
if any(nuc != nucs[0] for nuc in nucs[1:]):
break
else:
n += 1
if n > 0:
cols[ref_idx] = cols[ref_idx][n:] or '-'
for gt in gts:
for idx, allele in enumerate(gt):
if allele:
gt[idx] = allele[n:] or '-'
if cols[ref_idx] == '-':
n -= 1
cols[start_pos_idx] = str(int(cols[start_pos_idx]) + n)
# in-place substitution of so_effect with MAF effect
cols[variant_classification_idx] = VariantEffect(
cols[variant_type_idx]
)[cols[variant_classification_idx]]
ret_line = '\t'.join([cols[n] for n in cols_to_print])
field_formatters = {
'tumor_seq_allele1': gts[0][0],
'tumor_seq_allele2': gts[0][1],
'match_norm_seq_allele1': gts[1][0],
'match_norm_seq_allele2': gts[1][1],
't_alt_count': gt_alt_depths[0],
'n_alt_count': gt_alt_depths[1],
't_ref_count': gt_ref_depths[0],
'n_ref_count': gt_ref_depths[1],
}
print(
# use safe_substitute here to avoid key errors with column content
# looking like unknown placeholders
string.Template(ret_line).safe_substitute(field_formatters)
)
| mit |
hmen89/odoo | addons/l10n_bo/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DeeDee22/nelliepi | src/ch/fluxkompensator/nelliepi/ui/screen/ListScreen.py | 1 | 8571 | '''
Created on Oct 4, 2013
@author: geraldine
'''
from ch.fluxkompensator.nelliepi.ui.screen.ScreenWithFooter import ScreenWithFooter
from ch.fluxkompensator.nelliepi.ui.widgets.TextButton import TextButton
from ch.fluxkompensator.nelliepi.Constants import RED
from ch.fluxkompensator.nelliepi.Constants import BLUE
from ch.fluxkompensator.nelliepi.functions import ListFunction
from ch.fluxkompensator.nelliepi.functions import PlayFunction
from ch.fluxkompensator.nelliepi.music import Player
class ListScreen(ScreenWithFooter):
'''
classdocs
'''
fileList = None
directoryName = None
level = 0
parentDirectories = None
PADDING = 5
NUMBER_OF_POSSIBLE_ROWS = 8
def __init__(self):
'''
Constructor
'''
ScreenWithFooter.__init__(self, "listScreen")
def setDirectoryToList(self, pDirectoryToList, pStartIndex=0, pParentDirectories=[]):
if pParentDirectories is None :
pParentDirectories = []
print("start index is " + str(pStartIndex))
dirString = "None"
if not pDirectoryToList is None:
dirString = pDirectoryToList
print("pDirectoryToList is " + dirString)
parentDirString = "None"
if not len(pParentDirectories) == 0 :
parentDirString = pParentDirectories[len(pParentDirectories) - 1]
print("parent: " + parentDirString)
self.parentDirectories = pParentDirectories
self.directoryName = pDirectoryToList
self.fileList = self.getFileList(pDirectoryToList)
fontSize=self.getFontSize()
self.addUpButton()
self.addPrevButton(pStartIndex)
i = 0
maxIndex=self.getMaxIndex(pStartIndex, len(self.fileList))
print("max index is " + str(maxIndex))
for currentFile in self.fileList:
if i >= pStartIndex and i <= maxIndex:
if currentFile.has_key("directory"):
directoryName = self.extractFileName(currentFile["directory"])
#print("directory: " + directoryName)
height=self.getHeightForButton(self.getMaxHeight(), len(self.fileList), i, pStartIndex)
button = TextButton(10, height, ">", pColor=RED, pFontSize=fontSize, pMethod=PlayFunction.function, pParams=[currentFile["directory"]])
self.addButton(button)
button = TextButton(self.getMaxWidth() / 2, height, directoryName, pColor=RED, pFontSize=fontSize, pMethod=ListFunction.function, pParams=[currentFile["directory"], 0, self.getParentsOfChild(directoryName)])
self.addButton(button)
elif currentFile.has_key("file"):
fileName = self.extractFileName(currentFile["file"])
#print("file: " + fileName)
button = TextButton(self.getMaxWidth() / 2, self.getHeightForButton(self.getMaxHeight(), len(self.fileList), i, pStartIndex), fileName, pFontSize=fontSize, pMethod=PlayFunction.function, pParams=[currentFile["file"]])
self.addButton(button)
i=i+1
self.addNextButton(pStartIndex, maxIndex)
def extractFileName(self, fileWithPath):
index = fileWithPath.rfind("/")
return fileWithPath[index+1:]
def getHeightForPrevElement(self, pMaxHeight, pTotalNumber):
fontSize = self.getFontSize()
return fontSize / 2 + self.PADDING
def getHeightForButton(self, pMaxHeight, pTotalNumber, pIndex, pStartIndex):
relativeIndex = pIndex - pStartIndex
rowForPrevElement = 0
if pStartIndex > 0:
rowForPrevElement = 1
fontSize = self.getFontSize()
firstElement = fontSize / 2 + self.PADDING
rowHeight = fontSize + self.PADDING
return firstElement + relativeIndex * rowHeight + rowForPrevElement * rowHeight
def getFontSize(self):
return 20
def getMaxIndex(self, pStartIndex, pTotalNumber):
maxIndex=pStartIndex + self.NUMBER_OF_POSSIBLE_ROWS -1
if pTotalNumber > maxIndex + 1:
#in this case we need a "next" element
maxIndex = maxIndex -1
if pStartIndex > 0:
#in this case we need a "previous" element
maxIndex = maxIndex -1
return maxIndex
def addUpButton(self):
if not len(self.parentDirectories) < 1:
button = TextButton(self.getMaxWidth() -10, self.getHeightForPrevElement(self.getMaxHeight(), len(self.fileList)), "^", pColor=BLUE, pFontSize=self.getFontSize(), pMethod=ListFunction.function, pParams=[self.getDirectParent(), 0, self.getParentsOfParents()])
self.addButton(button)
def getDirectParent(self):
if len(self.parentDirectories) < 1:
return None
return self.parentDirectories[len(self.parentDirectories) -1]
def getParentsOfChild(self, dirNameOfThisScreen):
if self.parentDirectories is None:
return [self.directoryName]
else :
result = []
for currentDir in self.parentDirectories:
result.append(currentDir)
result.append(self.directoryName)
return result
def getParentsOfParents(self):
result = []
i=0
for parent in self.parentDirectories:
if i<len(self.parentDirectories) -1:
result.append(parent)
i=i+1
return result
def addPrevButton(self, pStartIndex):
if pStartIndex > 0:
nextStartIndex = pStartIndex - self.NUMBER_OF_POSSIBLE_ROWS
#next screen definitely has a next button
nextStartIndex = nextStartIndex + 1
#does previous screen have a previous button?
if nextStartIndex > 0:
nextStartIndex = nextStartIndex + 1
if nextStartIndex < 0:
nextStartIndex = 0
print("next start index for PrevButton: " + str(nextStartIndex))
button = TextButton(self.getMaxWidth() / 2, self.getHeightForPrevElement(self.getMaxHeight(), len(self.fileList)), "<previous>", pColor=BLUE, pFontSize=self.getFontSize(), pMethod=ListFunction.function, pParams=[self.directoryName, nextStartIndex, self.parentDirectories])
self.addButton(button)
def addNextButton(self, pStartIndex,pMaxIndex):
if len(self.fileList) > pMaxIndex + 1:
nextStartIndex=pMaxIndex + 1
print("next start index forNextButton: " + str(nextStartIndex))
fontSize=self.getFontSize()
button = TextButton(self.getMaxWidth() / 2, self.getHeightForButton(self.getMaxHeight(), len(self.fileList),pMaxIndex +1, pStartIndex), "<next>", pColor=BLUE, pFontSize=fontSize, pMethod=ListFunction.function, pParams=[self.directoryName, nextStartIndex, self.parentDirectories])
self.addButton(button)
def getFileList(self, pDirectoryName):
wholeList = None
if pDirectoryName == "_main_":
wholeList = Player.listFiles()
else :
wholeList = Player.listFiles(pDirectoryName)
result = []
for currentFile in wholeList:
fileName = None
if(currentFile.has_key("directory")):
fileName=currentFile["directory"]
else:
fileName=currentFile["file"]
if self.isDirectlyInDirectory(fileName):
result.append(currentFile);
return result
def isDirectlyInDirectory(self, fileName):
level = 0
if not len(self.parentDirectories) < 1:
level=self.getDirectParent().count("/") + 2
if self.getDirectParent() == "_main_":
level = 1
occurences = fileName.count("/")
#print("=====")
#parentDirString = "None"
#if not self.parentDirectories is None:
# for currentParent in self.parentDirectories:
# parentDirString = parentDirString + currentParent + ","
#print("parent: " + parentDirString + " fileName:" + fileName + " occurences: " + str(occurences) + " level: " + str(level))
#print("=====")
return occurences == level
| gpl-2.0 |
jachitech/AndroidPrebuiltPackages | packages/libxml2-2.9.4/python/tests/relaxng.py | 35 | 1203 | #!/usr/bin/python -u
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
schema="""<?xml version="1.0"?>
<element name="foo"
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/annotation/1.0"
xmlns:ex1="http://www.example.com/n1"
xmlns:ex2="http://www.example.com/n2">
<a:documentation>A foo element.</a:documentation>
<element name="ex1:bar1">
<empty/>
</element>
<element name="ex2:bar2">
<empty/>
</element>
</element>
"""
instance="""<?xml version="1.0"?>
<foo><pre1:bar1 xmlns:pre1="http://www.example.com/n1"/><pre2:bar2 xmlns:pre2="http://www.example.com/n2"/></foo>"""
rngp = libxml2.relaxNGNewMemParserCtxt(schema, len(schema))
rngs = rngp.relaxNGParse()
ctxt = rngs.relaxNGNewValidCtxt()
doc = libxml2.parseDoc(instance)
ret = doc.relaxNGValidateDoc(ctxt)
if ret != 0:
print("error doing RelaxNG validation")
sys.exit(1)
doc.freeDoc()
del rngp
del rngs
del ctxt
libxml2.relaxNGCleanupTypes()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| apache-2.0 |
ran5515/DeepDecision | tensorflow/user_ops/invalid_op_test.py | 146 | 1217 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom user ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
class InvalidOpTest(tf.test.TestCase):
def testBasic(self):
library_filename = os.path.join(tf.resource_loader.get_data_files_path(),
'invalid_op.so')
with self.assertRaises(tf.errors.InvalidArgumentError):
tf.load_op_library(library_filename)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_inst/padova_inst_6/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
ict-felix/stack | vt_manager/src/python/vt_manager/communication/sfa/managers/AggregateManager.py | 4 | 2850 | from vt_manager.communication.sfa.util.version import version_core
from vt_manager.communication.sfa.util.xrn import Xrn
from vt_manager.communication.sfa.util.callids import Callids
from vt_manager.communication.sfa.drivers.VTSfaDriver import VTSfaDriver
from vt_manager.communication.sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
RecordNotFound, SfaNotImplemented, SliverDoesNotExist
import zlib
class AggregateManager:
''' SFA AM Class for VM_Manager'''
def __init__ (self, config=None):
self.driver = VTSfaDriver(None)
def ListSlices(self, api, creds, options):
raise Exception("External authorities do not have permissions to list OCF slices")
def ListResources(self, options):
slice_xrn = options.get('geni_slice_urn', None)
if slice_xrn:
xrn = Xrn(slice_xrn,'slice')
slice_leaf = xrn.get_leaf()
options['slice'] = slice_leaf
rspec = self.driver.list_resources(options)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
rspec = zlib.compress(rspec).encode('base64')
return rspec
def SliverStatus (self, xrn, options):
xrn = Xrn(xrn,'slice')
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.sliver_status(slice_leaf,authority,options)
def CreateSliver(self,xrn,rspec_string,users,creds,options):
xrn = Xrn(xrn, 'slice')
slice_leaf = xrn.get_leaf()
slice_hrn = xrn.get_hrn()
authority = xrn.get_authority_hrn()
expiration_date = self.driver.get_expiration(creds, slice_hrn)
return self.driver.create_sliver (slice_leaf,authority,rspec_string, users, options, expiration_date)
def DeleteSliver(self, xrn, options):
#TODO: Check the options or xrn to get a single vm.
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice(slice_leaf,authority,action='delete_slice')
def RenewSliver(self, xrn, expiration_time, options):
return True
def start_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice(slice_leaf,authority,action='start_slice')
def stop_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice (slice_leaf,authority,action='stop_slice')
def reset_slice(self,xrn):
xrn = Xrn(xrn)
slice_leaf = xrn.get_leaf()
authority = xrn.get_authority_hrn()
return self.driver.crud_slice (slice_leaf,authority,action='reset_slice')
def GetTicket(self, api, xrn, creds, rspec, users, options):
# ticket is dead.
raise SfaNotImplemented('Method GetTicket was deprecated.')
| apache-2.0 |
AntonPalich/sublime-evernote | lib/pygments/lexers/_robotframeworklexer.py | 57 | 18610 | # -*- coding: utf-8 -*-
"""
pygments.lexers._robotframeworklexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Robot Framework.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Copyright 2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pygments.lexer import Lexer
from pygments.token import Token
from pygments.util import text_type
HEADING = Token.Generic.Heading
SETTING = Token.Keyword.Namespace
IMPORT = Token.Name.Namespace
TC_KW_NAME = Token.Generic.Subheading
KEYWORD = Token.Name.Function
ARGUMENT = Token.String
VARIABLE = Token.Name.Variable
COMMENT = Token.Comment
SEPARATOR = Token.Punctuation
SYNTAX = Token.Punctuation
GHERKIN = Token.Generic.Emph
ERROR = Token.Error
def normalize(string, remove=''):
string = string.lower()
for char in remove + ' ':
if char in string:
string = string.replace(char, '')
return string
class RobotFrameworkLexer(Lexer):
"""
For `Robot Framework <http://robotframework.org>`_ test data.
Supports both space and pipe separated plain text formats.
.. versionadded:: 1.6
"""
name = 'RobotFramework'
aliases = ['robotframework']
filenames = ['*.txt', '*.robot']
mimetypes = ['text/x-robotframework']
def __init__(self, **options):
options['tabsize'] = 2
options['encoding'] = 'UTF-8'
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
row_tokenizer = RowTokenizer()
var_tokenizer = VariableTokenizer()
index = 0
for row in text.splitlines():
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
yield index, token, text_type(value)
index += len(value)
class VariableTokenizer(object):
def tokenize(self, string, token):
var = VariableSplitter(string, identifiers='$@%')
if var.start < 0 or token in (COMMENT, ERROR):
yield string, token
return
for value, token in self._tokenize(var, string, token):
if value:
yield value, token
def _tokenize(self, var, string, orig_token):
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
for value, token in self.tokenize(var.base, VARIABLE):
yield value, token
yield '}', SYNTAX
if var.index:
yield '[', SYNTAX
for value, token in self.tokenize(var.index, VARIABLE):
yield value, token
yield ']', SYNTAX
for value, token in self.tokenize(string[var.end:], orig_token):
yield value, token
class RowTokenizer(object):
def __init__(self):
self._table = UnknownTable()
self._splitter = RowSplitter()
testcases = TestCaseTable()
settings = SettingTable(testcases.set_default_template)
variables = VariableTable()
keywords = KeywordTable()
self._tables = {'settings': settings, 'setting': settings,
'metadata': settings,
'variables': variables, 'variable': variables,
'testcases': testcases, 'testcase': testcases,
'keywords': keywords, 'keyword': keywords,
'userkeywords': keywords, 'userkeyword': keywords}
def tokenize(self, row):
commented = False
heading = False
for index, value in enumerate(self._splitter.split(row)):
# First value, and every second after that, is a separator.
index, separator = divmod(index-1, 2)
if value.startswith('#'):
commented = True
elif index == 0 and value.startswith('*'):
self._table = self._start_table(value)
heading = True
for value, token in self._tokenize(value, index, commented,
separator, heading):
yield value, token
self._table.end_row()
def _start_table(self, header):
name = normalize(header, remove='*')
return self._tables.get(name, UnknownTable())
def _tokenize(self, value, index, commented, separator, heading):
if commented:
yield value, COMMENT
elif separator:
yield value, SEPARATOR
elif heading:
yield value, HEADING
else:
for value, token in self._table.tokenize(value, index):
yield value, token
class RowSplitter(object):
_space_splitter = re.compile('( {2,})')
_pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
for value in splitter(row):
yield value
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
for value in self._space_splitter.split(row):
yield value
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
yield separator
while self._pipe_splitter.search(rest):
cell, separator, rest = self._pipe_splitter.split(rest, 1)
yield cell
yield separator
yield rest
class Tokenizer(object):
_tokens = None
def __init__(self):
self._index = 0
def tokenize(self, value):
values_and_tokens = self._tokenize(value, self._index)
self._index += 1
if isinstance(values_and_tokens, type(Token)):
values_and_tokens = [(value, values_and_tokens)]
return values_and_tokens
def _tokenize(self, value, index):
index = min(index, len(self._tokens) - 1)
return self._tokens[index]
def _is_assign(self, value):
if value.endswith('='):
value = value[:-1].strip()
var = VariableSplitter(value, identifiers='$@')
return var.start == 0 and var.end == len(value)
class Comment(Tokenizer):
_tokens = (COMMENT,)
class Setting(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
'suitepostcondition', 'testsetup', 'testprecondition',
'testteardown', 'testpostcondition', 'testtemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
'testtimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
Tokenizer.__init__(self)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 1 and self._template_setter:
self._template_setter(value)
if index == 0:
normalized = normalize(value)
if normalized in self._keyword_settings:
self._custom_tokenizer = KeywordCall(support_assign=False)
elif normalized in self._import_settings:
self._custom_tokenizer = ImportSetting()
elif normalized not in self._other_settings:
return ERROR
elif self._custom_tokenizer:
return self._custom_tokenizer.tokenize(value)
return Tokenizer._tokenize(self, value, index)
class ImportSetting(Tokenizer):
_tokens = (IMPORT, ARGUMENT)
class TestCaseSetting(Setting):
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
'template')
_import_settings = ()
_other_settings = ('documentation', 'tags', 'timeout')
def _tokenize(self, value, index):
if index == 0:
type = Setting._tokenize(self, value[1:-1], index)
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
return Setting._tokenize(self, value, index)
class KeywordSetting(TestCaseSetting):
_keyword_settings = ('teardown',)
_other_settings = ('documentation', 'arguments', 'return', 'timeout')
class Variable(Tokenizer):
_tokens = (SYNTAX, ARGUMENT)
def _tokenize(self, value, index):
if index == 0 and not self._is_assign(value):
return ERROR
return Tokenizer._tokenize(self, value, index)
class KeywordCall(Tokenizer):
_tokens = (KEYWORD, ARGUMENT)
def __init__(self, support_assign=True):
Tokenizer.__init__(self)
self._keyword_found = not support_assign
self._assigns = 0
def _tokenize(self, value, index):
if not self._keyword_found and self._is_assign(value):
self._assigns += 1
return SYNTAX # VariableTokenizer tokenizes this later.
if self._keyword_found:
return Tokenizer._tokenize(self, value, index - self._assigns)
self._keyword_found = True
return GherkinTokenizer().tokenize(value, KEYWORD)
class GherkinTokenizer(object):
_gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
def tokenize(self, value, token):
match = self._gherkin_prefix.match(value)
if not match:
return [(value, token)]
end = match.end()
return [(value[:end], GHERKIN), (value[end:], token)]
class TemplatedKeywordCall(Tokenizer):
_tokens = (ARGUMENT,)
class ForLoop(Tokenizer):
def __init__(self):
Tokenizer.__init__(self)
self._in_arguments = False
def _tokenize(self, value, index):
token = self._in_arguments and ARGUMENT or SYNTAX
if value.upper() in ('IN', 'IN RANGE'):
self._in_arguments = True
return token
class _Table(object):
_tokenizer_class = None
def __init__(self, prev_tokenizer=None):
self._tokenizer = self._tokenizer_class()
self._prev_tokenizer = prev_tokenizer
self._prev_values_on_row = []
def tokenize(self, value, index):
if self._continues(value, index):
self._tokenizer = self._prev_tokenizer
yield value, SYNTAX
else:
for value_and_token in self._tokenize(value, index):
yield value_and_token
self._prev_values_on_row.append(value)
def _continues(self, value, index):
return value == '...' and all(self._is_empty(t)
for t in self._prev_values_on_row)
def _is_empty(self, value):
return value in ('', '\\')
def _tokenize(self, value, index):
return self._tokenizer.tokenize(value)
def end_row(self):
self.__init__(prev_tokenizer=self._tokenizer)
class UnknownTable(_Table):
_tokenizer_class = Comment
def _continues(self, value, index):
return False
class VariableTable(_Table):
_tokenizer_class = Variable
class SettingTable(_Table):
_tokenizer_class = Setting
def __init__(self, template_setter, prev_tokenizer=None):
_Table.__init__(self, prev_tokenizer)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 0 and normalize(value) == 'testtemplate':
self._tokenizer = Setting(self._template_setter)
return _Table._tokenize(self, value, index)
def end_row(self):
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
class TestCaseTable(_Table):
_setting_class = TestCaseSetting
_test_template = None
_default_template = None
@property
def _tokenizer_class(self):
if self._test_template or (self._default_template and
self._test_template is not False):
return TemplatedKeywordCall
return KeywordCall
def _continues(self, value, index):
return index > 0 and _Table._continues(self, value, index)
def _tokenize(self, value, index):
if index == 0:
if value:
self._test_template = None
return GherkinTokenizer().tokenize(value, TC_KW_NAME)
if index == 1 and self._is_setting(value):
if self._is_template(value):
self._test_template = False
self._tokenizer = self._setting_class(self.set_test_template)
else:
self._tokenizer = self._setting_class()
if index == 1 and self._is_for_loop(value):
self._tokenizer = ForLoop()
if index == 1 and self._is_empty(value):
return [(value, SYNTAX)]
return _Table._tokenize(self, value, index)
def _is_setting(self, value):
return value.startswith('[') and value.endswith(']')
def _is_template(self, value):
return normalize(value) == '[template]'
def _is_for_loop(self, value):
return value.startswith(':') and normalize(value, remove=':') == 'for'
def set_test_template(self, template):
self._test_template = self._is_template_set(template)
def set_default_template(self, template):
self._default_template = self._is_template_set(template)
def _is_template_set(self, template):
return normalize(template) not in ('', '\\', 'none', '${empty}')
class KeywordTable(TestCaseTable):
_tokenizer_class = KeywordCall
_setting_class = KeywordSetting
def _is_template(self, value):
return False
# Following code copied directly from Robot Framework 2.7.5.
class VariableSplitter:
def __init__(self, string, identifiers):
self.identifier = None
self.base = None
self.index = None
self.start = -1
self.end = -1
self._identifiers = identifiers
self._may_have_internal_variables = False
try:
self._split(string)
except ValueError:
pass
else:
self._finalize()
def get_replaced_base(self, variables):
if self._may_have_internal_variables:
return variables.replace_string(self.base)
return self.base
def _finalize(self):
self.identifier = self._variable_chars[0]
self.base = ''.join(self._variable_chars[2:-1])
self.end = self.start + len(self._variable_chars)
if self._has_list_variable_index():
self.index = ''.join(self._list_variable_index_chars[1:-1])
self.end += len(self._list_variable_index_chars)
def _has_list_variable_index(self):
return self._list_variable_index_chars\
and self._list_variable_index_chars[-1] == ']'
def _split(self, string):
start_index, max_index = self._find_variable(string)
self.start = start_index
self._open_curly = 1
self._state = self._variable_state
self._variable_chars = [string[start_index], '{']
self._list_variable_index_chars = []
self._string = string
start_index += 2
for index, char in enumerate(string[start_index:]):
index += start_index # Giving start to enumerate only in Py 2.6+
try:
self._state(char, index)
except StopIteration:
return
if index == max_index and not self._scanning_list_variable_index():
return
def _scanning_list_variable_index(self):
return self._state in [self._waiting_list_variable_index_state,
self._list_variable_index_state]
def _find_variable(self, string):
max_end_index = string.rfind('}')
if max_end_index == -1:
raise ValueError('No variable end found')
if self._is_escaped(string, max_end_index):
return self._find_variable(string[:max_end_index])
start_index = self._find_start_index(string, 1, max_end_index)
if start_index == -1:
raise ValueError('No variable start found')
return start_index, max_end_index
def _find_start_index(self, string, start, end):
index = string.find('{', start, end) - 1
if index < 0:
return -1
if self._start_index_is_ok(string, index):
return index
return self._find_start_index(string, index+2, end)
def _start_index_is_ok(self, string, index):
return string[index] in self._identifiers\
and not self._is_escaped(string, index)
def _is_escaped(self, string, index):
escaped = False
while index > 0 and string[index-1] == '\\':
index -= 1
escaped = not escaped
return escaped
def _variable_state(self, char, index):
self._variable_chars.append(char)
if char == '}' and not self._is_escaped(self._string, index):
self._open_curly -= 1
if self._open_curly == 0:
if not self._is_list_variable():
raise StopIteration
self._state = self._waiting_list_variable_index_state
elif char in self._identifiers:
self._state = self._internal_variable_start_state
def _is_list_variable(self):
return self._variable_chars[0] == '@'
def _internal_variable_start_state(self, char, index):
self._state = self._variable_state
if char == '{':
self._variable_chars.append(char)
self._open_curly += 1
self._may_have_internal_variables = True
else:
self._variable_state(char, index)
def _waiting_list_variable_index_state(self, char, index):
if char != '[':
raise StopIteration
self._list_variable_index_chars.append(char)
self._state = self._list_variable_index_state
def _list_variable_index_state(self, char, index):
self._list_variable_index_chars.append(char)
if char == ']':
raise StopIteration
| mit |
andfoy/margffoy-tuay-server | env/lib/python2.7/site-packages/django/template/response.py | 61 | 8910 | import warnings
from django.http import HttpResponse
from django.template import Context, RequestContext, Template, loader
from django.template.backends.django import Template as BackendTemplate
from django.template.context import _current_app_undefined
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
class ContentNotRenderedError(Exception):
pass
class SimpleTemplateResponse(HttpResponse):
rendering_attrs = ['template_name', 'context_data', '_post_render_callbacks']
def __init__(self, template, context=None, content_type=None, status=None,
charset=None, using=None):
if isinstance(template, Template):
warnings.warn(
"{}'s template argument cannot be a django.template.Template "
"anymore. It may be a backend-specific template like those "
"created by get_template().".format(self.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
template = BackendTemplate(template)
# It would seem obvious to call these next two members 'template' and
# 'context', but those names are reserved as part of the test Client
# API. To avoid the name collision, we use different names.
self.template_name = template
self.context_data = context
self.using = using
self._post_render_callbacks = []
# _request stores the current request object in subclasses that know
# about requests, like TemplateResponse. It's defined in the base class
# to minimize code duplication.
# It's called self._request because self.request gets overwritten by
# django.test.client.Client. Unlike template_name and context_data,
# _request should not be considered part of the public API.
self._request = None
# content argument doesn't make sense here because it will be replaced
# with rendered template so we always pass empty string in order to
# prevent errors and provide shorter signature.
super(SimpleTemplateResponse, self).__init__('', content_type, status, charset)
# _is_rendered tracks whether the template and context has been baked
# into a final response.
# Super __init__ doesn't know any better than to set self.content to
# the empty string we just gave it, which wrongly sets _is_rendered
# True, so we initialize it to False after the call to super __init__.
self._is_rendered = False
def __getstate__(self):
"""Pickling support function.
Ensures that the object can't be pickled before it has been
rendered, and that the pickled state only includes rendered
data, not the data used to construct the response.
"""
obj_dict = self.__dict__.copy()
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be pickled.')
for attr in self.rendering_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def resolve_template(self, template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template, using=self.using)
elif isinstance(template, six.string_types):
return loader.get_template(template, using=self.using)
else:
return template
def _resolve_template(self, template):
# This wrapper deprecates returning a django.template.Template in
# subclasses that override resolve_template. It can be removed in
# Django 2.0.
new_template = self.resolve_template(template)
if isinstance(new_template, Template):
warnings.warn(
"{}.resolve_template() must return a backend-specific "
"template like those created by get_template(), not a "
"{}.".format(
self.__class__.__name__, new_template.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
new_template = BackendTemplate(new_template)
return new_template
def resolve_context(self, context):
return context
def _resolve_context(self, context):
# This wrapper deprecates returning a Context or a RequestContext in
# subclasses that override resolve_context. It can be removed in
# Django 2.0. If returning a Context or a RequestContext works by
# accident, it won't be an issue per se, but it won't be officially
# supported either.
new_context = self.resolve_context(context)
if isinstance(new_context, RequestContext) and self._request is None:
self._request = new_context.request
if isinstance(new_context, Context):
warnings.warn(
"{}.resolve_context() must return a dict, not a {}.".format(
self.__class__.__name__, new_context.__class__.__name__),
RemovedInDjango20Warning, stacklevel=2)
# It would be tempting to do new_context = new_context.flatten()
# here but that would cause template context processors to run for
# TemplateResponse(request, template, Context({})), which would be
# backwards-incompatible. As a consequence another deprecation
# warning will be raised when rendering the template. There isn't
# much we can do about that.
return new_context
@property
def rendered_content(self):
"""Returns the freshly rendered content for the template and context
described by the TemplateResponse.
This *does not* set the final content of the response. To set the
response content, you must either call render(), or set the
content explicitly using the value of this property.
"""
template = self._resolve_template(self.template_name)
context = self._resolve_context(self.context_data)
content = template.render(context, self._request)
return content
def add_post_render_callback(self, callback):
"""Adds a new post-rendering callback.
If the response has already been rendered,
invoke the callback immediately.
"""
if self._is_rendered:
callback(self)
else:
self._post_render_callbacks.append(callback)
def render(self):
"""Renders (thereby finalizing) the content of the response.
If the content has already been rendered, this is a no-op.
Returns the baked response instance.
"""
retval = self
if not self._is_rendered:
self.content = self.rendered_content
for post_callback in self._post_render_callbacks:
newretval = post_callback(retval)
if newretval is not None:
retval = newretval
return retval
@property
def is_rendered(self):
return self._is_rendered
def __iter__(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be iterated over.')
return super(SimpleTemplateResponse, self).__iter__()
@property
def content(self):
if not self._is_rendered:
raise ContentNotRenderedError('The response content must be '
'rendered before it can be accessed.')
return super(SimpleTemplateResponse, self).content
@content.setter
def content(self, value):
"""Sets the content for the response
"""
HttpResponse.content.fset(self, value)
self._is_rendered = True
class TemplateResponse(SimpleTemplateResponse):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_request', '_current_app']
def __init__(self, request, template, context=None, content_type=None,
status=None, current_app=_current_app_undefined, charset=None,
using=None):
# As a convenience we'll allow callers to provide current_app without
# having to avoid needing to create the RequestContext directly
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of TemplateResponse is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
request.current_app = current_app
super(TemplateResponse, self).__init__(
template, context, content_type, status, charset, using)
self._request = request
| gpl-2.0 |
DanielSlater/PyGamePlayer | games/mini_pong.py | 2 | 5040 | # Modified from http://www.pygame.org/project-Very+simple+Pong+game-816-.html
import pygame
from pygame.locals import *
bar1_score, bar2_score = 0, 0
def run(screen_width=40., screen_height=40.):
global bar1_score, bar2_score
pygame.init()
bar_width, bar_height = screen_width / 32., screen_height / 9.6
bar_dist_from_edge = screen_width / 64.
circle_diameter = screen_height / 16.
circle_radius = circle_diameter / 2.
bar_1_start_x, bar_2_start_x = bar_dist_from_edge, screen_width - bar_dist_from_edge - bar_width
bar_start_y = (screen_height - bar_height) / 2.
bar_max_y = screen_height - bar_height - bar_dist_from_edge
circle_start_x, circle_start_y = (screen_width - circle_diameter) / 2., (screen_width - circle_diameter) / 2.
screen = pygame.display.set_mode((int(screen_width), int(screen_height)), 0, 32)
# Creating 2 bars, a ball and background.
back = pygame.Surface((int(screen_width), int(screen_height)))
background = back.convert()
background.fill((0, 0, 0))
bar = pygame.Surface((int(bar_width), int(bar_height)))
bar1 = bar.convert()
bar1.fill((255, 255, 255))
bar2 = bar.convert()
bar2.fill((255, 255, 255))
circle_surface = pygame.Surface((int(circle_diameter), int(circle_diameter)))
pygame.draw.circle(circle_surface, (255, 255, 255), (int(circle_radius), int(circle_radius)), int(circle_radius))
circle = circle_surface.convert()
circle.set_colorkey((0, 0, 0))
# some definitions
bar1_x, bar2_x = bar_1_start_x, bar_2_start_x
bar1_y, bar2_y = bar_start_y, bar_start_y
circle_x, circle_y = circle_start_x, circle_start_y
bar1_move, bar2_move = 0., 0.
speed_x, speed_y, speed_circle = screen_width / 2.56, screen_height / 1.92, screen_width / 2.56 # 250., 250., 250.
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == KEYDOWN:
if event.key == K_UP:
bar1_move = -ai_speed
elif event.key == K_DOWN:
bar1_move = ai_speed
elif event.type == KEYUP:
if event.key == K_UP:
bar1_move = 0.
elif event.key == K_DOWN:
bar1_move = 0.
screen.blit(background, (0, 0))
screen.blit(bar1, (bar1_x, bar1_y))
screen.blit(bar2, (bar2_x, bar2_y))
screen.blit(circle, (circle_x, circle_y))
bar1_y += bar1_move
# movement of circle
time_passed = clock.tick(30)
time_sec = time_passed / 1000.0
circle_x += speed_x * time_sec
circle_y += speed_y * time_sec
ai_speed = speed_circle * time_sec
# AI of the computer.
if circle_x >= (screen_width / 2.) - circle_diameter:
if not bar2_y == circle_y + circle_radius:
if bar2_y < circle_y + circle_radius:
bar2_y += ai_speed
if bar2_y > circle_y - (bar_height - circle_radius):
bar2_y -= ai_speed
else:
bar2_y == circle_y + circle_radius
if bar1_y >= bar_max_y:
bar1_y = bar_max_y
elif bar1_y <= bar_dist_from_edge:
bar1_y = bar_dist_from_edge
if bar2_y >= bar_max_y:
bar2_y = bar_max_y
elif bar2_y <= bar_dist_from_edge:
bar2_y = bar_dist_from_edge
# since i don't know anything about collision, ball hitting bars goes like this.
if circle_x <= bar1_x + bar_dist_from_edge:
if circle_y >= bar1_y - circle_radius and circle_y <= bar1_y + (bar_height - circle_radius):
circle_x = bar_dist_from_edge + bar_width
speed_x = -speed_x
if circle_x >= bar2_x - circle_diameter:
if circle_y >= bar2_y - circle_radius and circle_y <= bar2_y + (bar_height - circle_radius):
circle_x = screen_width - bar_dist_from_edge - bar_width - circle_diameter
speed_x = -speed_x
if circle_x < -circle_radius:
bar2_score += 1
circle_x, circle_y = (screen_width + circle_diameter) / 2., circle_start_y
bar1_y, bar_2_y = bar_start_y, bar_start_y
elif circle_x > screen_width - circle_diameter:
bar1_score += 1
circle_x, circle_y = circle_start_x, circle_start_y
bar1_y, bar2_y = bar_start_y, bar_start_y
if circle_y <= bar_dist_from_edge:
speed_y = -speed_y
circle_y = bar_dist_from_edge
elif circle_y >= screen_height - circle_diameter - circle_radius:
speed_y = -speed_y
circle_y = screen_height - circle_diameter - circle_radius
pygame.display.update()
pygame.quit()
if __name__ == '__main__':
run()
| mit |
bgribble/mfp | mfp/gui/xyplot/scatterplot.py | 1 | 7839 | #! /usr/bin/env python
'''
scatterplot.py
Specialization of XYPlot for showing sets of discrete datapoints
Copyright (c) 2012 Bill Gribble <[email protected]>
'''
import math
from .mark_style import MarkStyle
from .xyplot import XYPlot
from mfp import log
class ScatterPlot (XYPlot):
def __init__(self, element, width, height):
# data points
self.points = {}
self.points_by_tile = {}
# roll-mode scroll speeds
self.x_scroll = 0
self.y_scroll = 0
XYPlot.__init__(self, element, width, height)
def draw_field_cb(self, texture, ctxt, px_min, px_max):
def stroke_to(styler, curve, px, ptnum, delta):
points = self.points.get(curve)
dst_ptnum = ptnum + delta
if dst_ptnum < 0 or dst_ptnum > points[-1][0]:
return
dst_num, dst_pt = points[dst_ptnum]
dst_px = self.pt2px(dst_pt)
dst_px[0] -= px_min[0]
dst_px[1] -= px_min[1]
styler.stroke(ctxt, dst_px, px)
# if the viewport is animated (viewport_scroll not 0)
# the position of the field may have changed.
field_vp = self.plot.get_viewport_origin()
field_vp_pos = self.px2pt(field_vp)
field_w = self.x_max - self.x_min
field_h = self.y_max - self.y_min
if self.x_min != field_vp_pos[0]:
self.x_min = field_vp_pos[0]
self.x_max = self.x_min + field_w
self._recalc_x_scale()
if self.y_max != field_vp_pos[1]:
self.y_max = field_vp_pos[1]
self.y_min = self.y_max - field_h
self._recalc_y_scale()
for curve in self.points:
curve = int(curve)
styler = self.style.get(curve)
if styler is None:
log.warning("[scatterplot]: no style for curve", curve)
styler = self.style[curve] = MarkStyle()
tile_id = self.plot.tile_reverse.get(texture)
if tile_id is None:
return
points = self.points_by_tile[curve].get(tile_id)
if points is not None:
for ptnum, p in points:
pc = self.pt2px(p)
pc[0] -= px_min[0]
pc[1] -= px_min[1]
styler.mark(ctxt, pc)
if styler.stroke_style:
stroke_to(styler, curve, pc, ptnum, -1)
if styler.stroke_style:
ptnum, p = points[-1]
pc = self.pt2px(p)
pc[0] -= px_min[0]
pc[1] -= px_min[1]
stroke_to(styler, curve, pc, ptnum, 1)
def set_scroll_rate(self, vx, vy):
px = self.pt2px((vx, vy))
self.x_axis.set_viewport_scroll(px[0], 0)
self.y_axis.set_viewport_scroll(0, px[1])
self.plot.set_viewport_scroll(px[0], px[1])
def append(self, point, curve=0):
curve = int(curve)
pts = self.points.setdefault(curve, [])
ptnum = len(pts)
pts.append([ptnum, point])
tiles = self.index_point(point, curve, ptnum)
for tile_id in tiles:
tex = self.plot.tile_by_pos.get(tile_id)
if tex is not None:
tex.invalidate()
def index_point(self, point, curve, ptnum):
tile_size = self.plot.tile_size
def tile_id(point):
return (int(math.floor(point[0] / tile_size) * tile_size),
int(math.floor(point[1] / tile_size) * tile_size))
px = self.pt2px(point)
if px is None:
# point is not legal, usually on log charts
return []
curve = int(curve)
tiles = []
pts = self.points.setdefault(curve, {})
bytile = self.points_by_tile.setdefault(curve, {})
style = self.style.get(curve)
if style is None:
style = self.style[curve] = MarkStyle()
markradius = style.size
for dx in [-markradius, markradius]:
for dy in [-markradius, markradius]:
x = px[0] + dx
y = px[1] + dy
tid = tile_id((x, y))
if tid not in tiles:
tiles.append(tid)
if style.stroke_style and ptnum > 0:
prev_pt = pts[ptnum - 1][1]
prev_px = self.pt2px(prev_pt)
if prev_px is not None:
tid = tile_id(prev_px)
if tid not in tiles:
tiles.append(tid)
for tile_id in tiles:
pts = bytile.setdefault(tile_id, [])
pts.append([ptnum, point])
return tiles
def reindex(self):
self.points_by_tile = {}
for curve, curvepoints in self.points.items():
for ptnum, point in curvepoints:
self.index_point(point, curve, ptnum)
def clear(self, curve=None):
if curve is None:
self.points = {}
self.points_by_tile = {}
elif curve is not None:
if curve in self.points:
del self.points[curve]
self.reindex()
self.plot.clear()
def set_style(self, style):
for inlet, istyle in style.items():
inlet = int(inlet)
marker = self.style.setdefault(inlet, MarkStyle())
for k, v in istyle.items():
if k == "size":
marker.size = float(v)
elif k == "color":
marker.set_color(v)
elif k == "shape":
marker.shape = str(v)
elif k == "stroke":
marker.stroke_style = str(v)
def save_style(self):
sd = {}
for inlet, style in self.style.items():
props = sd.setdefault(str(inlet), {})
props["size"] = style.size
props["color"] = style.colorspec
props["shape"] = style.shape
props["stroke"] = style.stroke_style
return sd
def configure(self, params):
modes = dict(LINEAR=0, LOG=1, LOG_10=1, LOG_2=2)
s = params.get("plot_style")
if s:
self.set_style(s)
need_vp = False
x = params.get("x_axis")
if x:
mode = modes.get(x.upper())
if mode != self.x_axis_mode:
self.x_axis_mode = mode
self._recalc_x_scale()
xax = self.pt2px((self.x_min, self.y_min))
self.x_axis.set_viewport_origin(xax[0], 0, True)
need_vp = True
y = params.get("y_axis")
if y:
mode = modes.get(y.upper())
if mode != self.y_axis_mode:
self.y_axis_mode = mode
self._recalc_y_scale()
yax = self.pt2px((self.x_min, self.y_max))
self.y_axis.set_viewport_origin(0, yax[1], True)
need_vp = True
if need_vp:
origin = self.pt2px((self.x_min, self.y_max))
self.set_field_origin(origin[0], origin[1], True)
def set_field_origin(self, x_orig, y_orig, redraw):
self.plot.set_viewport_origin(x_orig, y_orig, redraw)
def command(self, action, data):
if action == "add":
for c in data:
for p in data[c]:
self.append(p, c)
return True
elif action == "roll":
self.set_bounds(None, None, data, None)
self.set_scroll_rate(1.0, 0)
return True
elif action == "stop":
self.set_scroll_rate(0.0, 0.0)
return True
elif action == "reset":
self.set_bounds(None, None, data, None)
return True
return False
| gpl-2.0 |
mtzirkel/skiff | quiz/multichoice/forms.py | 2 | 1220 | from django.forms import ModelForm, Textarea, NumberInput, SelectMultiple
from django import forms
from .models import MCQuestion, MCAnswer
class MCQuestionForm(ModelForm):
class Meta:
model = MCQuestion
fields = {'question_text',
'point_value',
'inquiz',
'answer_a',
'a_is_correct',
'answer_b',
'b_is_correct',
'answer_c',
'c_is_correct',
'answer_d',
'd_is_correct',
'inquiz'}
widgets = {
'question_text': Textarea(attrs={'cols': 70, 'rows': 10}),
'point_value': NumberInput(attrs={'cols': 3, 'row': 1}),
'answer_a': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_b': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_c': Textarea(attrs={'cols': 70, 'rows': 2}),
'answer_d': Textarea(attrs={'cols': 70, 'rows': 2}),
}
class MCAnswerForm(ModelForm):
class Meta:
model = MCAnswer
fields = {'student_choices'}
widgets = {'student_choice': SelectMultiple(choices='student_choices')}
| mit |
ikool/metact06-djan | lib/Crypto/Signature/__init__.py | 126 | 1202 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Digital signature protocols
A collection of standardized protocols to carry out digital signatures.
:undocumented: __revision__, __package__
"""
__all__ = [ 'PKCS1_v1_5', 'PKCS1_PSS' ]
__revision__ = "$Id$"
| bsd-3-clause |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/django/contrib/admin/views/decorators.py | 45 | 3276 | import base64
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django import http, template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy, ugettext as _
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'
def _display_login_form(request, error_message=''):
request.session.set_test_cookie()
return render_to_response('admin/login.html', {
'title': _('Log in'),
'app_path': request.get_full_path(),
'error_message': error_message
}, context_instance=template.RequestContext(request))
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
# If this isn't already the login page, display it.
if LOGIN_FORM_KEY not in request.POST:
if request.POST:
message = _("Please log in again, because your session has expired.")
else:
message = ""
return _display_login_form(request, message)
# Check that the user accepts cookies.
if not request.session.test_cookie_worked():
message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
return _display_login_form(request, message)
else:
request.session.delete_test_cookie()
# Check the password.
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
message = ERROR_MESSAGE
if '@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
users = list(User.objects.filter(email=username))
if len(users) == 1 and users[0].check_password(password):
message = _("Your e-mail address is not your username. Try '%s' instead.") % users[0].username
return _display_login_form(request, message)
# The user data is correct; log in the user in and continue.
else:
if user.is_active and user.is_staff:
login(request, user)
return http.HttpResponseRedirect(request.get_full_path())
else:
return _display_login_form(request, ERROR_MESSAGE)
return wraps(view_func)(_checklogin)
| apache-2.0 |
michael-yin/scrapy | scrapy/tests/test_log.py | 10 | 4667 | from cStringIO import StringIO
from twisted.python import log as txlog, failure
from twisted.trial import unittest
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.settings import default_settings
class LogTest(unittest.TestCase):
def test_get_log_level(self):
default_log_level = getattr(log, default_settings.LOG_LEVEL)
self.assertEqual(log._get_log_level('WARNING'), log.WARNING)
self.assertEqual(log._get_log_level(log.WARNING), log.WARNING)
self.assertRaises(ValueError, log._get_log_level, object())
class ScrapyFileLogObserverTest(unittest.TestCase):
level = log.INFO
encoding = 'utf-8'
def setUp(self):
self.f = StringIO()
self.sflo = log.ScrapyFileLogObserver(self.f, self.level, self.encoding)
self.sflo.start()
def tearDown(self):
self.flushLoggedErrors()
self.sflo.stop()
def logged(self):
return self.f.getvalue().strip()[25:]
def first_log_line(self):
logged = self.logged()
return logged.splitlines()[0] if logged else ''
def test_msg_basic(self):
log.msg("Hello")
self.assertEqual(self.logged(), "[scrapy] INFO: Hello")
def test_msg_spider(self):
spider = BaseSpider("myspider")
log.msg("Hello", spider=spider)
self.assertEqual(self.logged(), "[myspider] INFO: Hello")
def test_msg_level1(self):
log.msg("Hello", level=log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_level2(self):
log.msg("Hello", log.WARNING)
self.assertEqual(self.logged(), "[scrapy] WARNING: Hello")
def test_msg_wrong_level(self):
log.msg("Hello", level=9999)
self.assertEqual(self.logged(), "[scrapy] NOLEVEL: Hello")
def test_msg_level_spider(self):
spider = BaseSpider("myspider")
log.msg("Hello", spider=spider, level=log.WARNING)
self.assertEqual(self.logged(), "[myspider] WARNING: Hello")
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xc2\xa3100")
def test_msg_ignore_level(self):
log.msg("Hello", level=log.DEBUG)
log.msg("World", level=log.INFO)
self.assertEqual(self.logged(), "[scrapy] INFO: World")
def test_msg_ignore_system(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_msg_ignore_system_err(self):
txlog.msg("Hello")
self.failIf(self.logged())
def test_err_noargs(self):
try:
a = 1/0
except:
log.err()
self.failUnless('Traceback' in self.logged())
self.failUnless('ZeroDivisionError' in self.logged())
def test_err_why(self):
log.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[scrapy] ERROR: Wrong type")
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
def test_error_outside_scrapy(self):
"""Scrapy logger should still print outside errors"""
txlog.err(TypeError("bad type"), "Wrong type")
self.assertEqual(self.first_log_line(), "[-] ERROR: Wrong type")
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xc2\xa3")
def test_err_exc(self):
log.err(TypeError("bad type"))
self.failUnless('Unhandled Error' in self.logged())
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
def test_err_failure(self):
log.err(failure.Failure(TypeError("bad type")))
self.failUnless('Unhandled Error' in self.logged())
self.failUnless('TypeError' in self.logged())
self.failUnless('bad type' in self.logged())
class Latin1ScrapyFileLogObserverTest(ScrapyFileLogObserverTest):
encoding = 'latin-1'
def test_msg_encoding(self):
log.msg(u"Price: \xa3100")
logged = self.f.getvalue().strip()[25:]
self.assertEqual(self.logged(), "[scrapy] INFO: Price: \xa3100")
# this test fails in twisted trial observer, not in scrapy observer
# def test_err_why_encoding(self):
# log.err(TypeError("bad type"), u"\xa3")
# self.assertEqual(self.first_log_line(), "[scrapy] ERROR: \xa3")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
rriggio/empower-runtime | empower/apps/wifimobilitymanager/wifimobilitymanager.py | 1 | 1790 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A simple Wi-Fi mobility manager."""
from empower.managers.ranmanager.lvapp.wifiapp import EWiFiApp
from empower.core.app import EVERY
class WiFiMobilityManager(EWiFiApp):
"""A simple Wi-Fi mobility manager.
This app will peridodically handover every LVAP in the network to the
interface with the highest RSSI.
Parameters:
service_id: the application id as an UUID (mandatory)
project_id: the project id as an UUID (mandatory)
every: the loop period in ms (optional, default 2000ms)
Example:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 2000
}
}
"""
def loop(self):
"""Periodic job."""
for lvap in self.lvaps.values():
lvap.blocks = self.blocks().sort_by_rssi(lvap.addr).first()
def launch(context, service_id, every=EVERY):
""" Initialize the module. """
return WiFiMobilityManager(context=context,
service_id=service_id,
every=every)
| apache-2.0 |
twoh/leevee | env/Lib/encodings/zlib_codec.py | 58 | 3048 | """ Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
_is_text_encoding=False,
)
| mit |
HardTacos/techmodnotify-reddit-bot | techmodnotify.py | 1 | 6960 | #!/usr/bin/env python2.7
# =============================================================================
# IMPORTS
# =============================================================================
import praw
import MySQLdb
import ConfigParser
import time
import parsedatetime.parsedatetime as pdt
import pprint
import logging
from datetime import datetime, timedelta
from requests.exceptions import HTTPError, ConnectionError, Timeout
from praw.errors import ExceptionList, APIException, InvalidCaptcha, InvalidUser, RateLimitExceeded
from socket import timeout
from pytz import timezone
from multiprocessing import Process
# =============================================================================
# GLOBALS
# =============================================================================
# Reads the config file
config = ConfigParser.ConfigParser()
config.read("techmodnotify.cfg")
# Reddit info
user_agent = ("TechModNotify bot by /u/zathegfx")
reddit = praw.Reddit(user_agent = user_agent)
USER = config.get("Reddit", "username")
PASS = config.get("Reddit", "password")
DB_HOST = config.get("SQL", "host")
DB_NAME = config.get("SQL", "db")
DB_USER = config.get("SQL", "user")
DB_PASS = config.get("SQL", "pass")
DB_TABLE = config.get("SQL", "table")
# =============================================================================
# Functions
# =============================================================================
def save_to_db(db, submissionID, permalink, author):
"""
Saves the permalink submission, the time, and the author to the DB
"""
cursor = db.cursor()
currentTime1 = datetime.now(timezone('UTC'))
currentTime = format(currentTime1, '%Y-%m-%d %H:%M:%S')
replyTime1 = currentTime1 + timedelta(0,300)
replyTime = format(replyTime1, '%Y-%m-%d %H:%M:%S')
cmd = "SELECT * FROM " + DB_TABLE + " WHERE submissionID = %s"
cursor.execute(cmd, [submissionID])
results = cursor.fetchall()
if (len(results) > 0):
return True;
else:
cmd = "INSERT INTO " + DB_TABLE + " (submissionID, currentTime, replyTime, permalink, author) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(cmd, [submissionID, currentTime, replyTime, permalink, author])
print currentTime + ' - Inserted new record into table: ' + submissionID
db.commit()
def search_db():
"""
Search the database for any records that are over 10 minutes
"""
while True:
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME )
cursor = db.cursor()
currentTime1 = datetime.now(timezone('UTC'))
currentTime = format(currentTime1, '%Y-%m-%d %H:%M:%S')
cmd = "SELECT * FROM " + DB_TABLE + " WHERE replyTime < %s"
cursor.execute(cmd, [currentTime])
results = cursor.fetchall()
alreadySent = []
if ( len(results) > 0 ):
for row in results:
if row[0] not in alreadySent:
submission = reddit.get_submission(submission_id=row[1])
hasFlair = submission.link_flair_css_class
k = str(hasFlair)
if (k == "None"):
flagDelete = False
flagDelete = new_reply(row[4], row[5])
if flagDelete:
cmd = "DELETE FROM " + DB_TABLE + " WHERE id = %s"
cursor.execute(cmd, [row[0]])
db.commit()
print currentTime + ' - No flair detected - send message - deleting record - ' + row[1]
else:
cmd = "DELETE FROM " + DB_TABLE + " WHERE id = %s"
cursor.execute(cmd, [row[0]])
db.commit()
print currentTime + ' - Flair deteced - deleting record - ' + row[1]
alreadySent.append(row[0])
time.sleep(5)
def new_reply(permalink, author):
reddit.login(USER, PASS)
try:
reddit.send_message(author, 'Message from /r/technology',
"Hello " + author + ","
"\n\nWe appreciate your contribution to /r/technology! We noticed "
"that you haven't flaired your [post](" + permalink + ") yet. In order to keep this sub "
"organized, every post is required to be flaired with respect to "
"the articles main focus. This allows the readers of /r/technology "
"to easily find the articles that most interest them. "
"\n\n If you could take a moment out of your time to properly flair "
"your post, we would gladly apprieciate it. Instruction on properly "
"flairing your post can be found [here](http://www.reddit.com/r/technology/wiki/flair). "
"\n\n Thank you!"
"\n\n Techonology Mod Team"
"\n\n_____\n\n"
"\n\n *This is a bot - if you have any questions or need to report an issue regarding "
"this bot, please [message the mods](https://www.reddit.com/message/compose?to=%2Fr%2Ftechnology) immediately*"
"\n\n**Your Post:** " + permalink + "")
print "Message Sent!"
return True
except InvalidUser as err:
print "InvalidUser", err
return True
except APIException as err:
print "APIException", err
return False
except IndexError as err:
print "IndexError", err
return False
except (HTTPError, ConnectionError, Timeout, timeout) as err:
print "HTTPError", err
return False
except RateLimitExceeded as err:
print "RateLimitExceeded", err
time.sleep(10)
def main():
reddit.login(USER, PASS)
db = MySQLdb.connect(DB_HOST, DB_USER, DB_PASS, DB_NAME )
print "start"
while True:
try:
for submission in praw.helpers.submission_stream(reddit, 'technology', limit=5, verbosity=0):
submissionID = submission.id
author = submission.author
permalink = submission.permalink
save_to_db(db, submissionID, permalink, author)
except Exception as err:
print 'There was an error in main(): '
print err
# =============================================================================
# RUNNER
# =============================================================================
if __name__ == '__main__':
Process(target=main).start()
Process(target=search_db).start()
| mit |
ShaguptaS/moviepy | moviepy/video/compositing/CompositeVideoClip.py | 1 | 4062 | import numpy as np
from moviepy.video.VideoClip import VideoClip, ColorClip
from moviepy.audio.AudioClip import CompositeAudioClip
# CompositeVideoClip
class CompositeVideoClip(VideoClip):
"""
A VideoClip made of other videoclips displayed together. This is the
base class for most compositions.
:param size: The size (height x width) of the final clip.
:param clips: A list of videoclips. Each clip of the list will
be displayed below the clips appearing after it in the list.
For each clip:
- The attribute ``pos`` determines where the clip is placed.
See ``VideoClip.set_pos``
- The mask of the clip determines which parts are visible.
Finally, if all the clips in the list have their ``duration``
attribute set, then the duration of the composite video clip
is computed automatically
:param transparent: if False, the clips are overlaid on a surface
of the color `bg_color`. If True, the clips are overlaid on
a transparent surface, so that all pixels that are transparent
for all clips will be transparent in the composite clip. More
precisely, the mask of the composite clip is then the composite
of the masks of the different clips. Only use `transparent=True`
when you intend to use your composite clip as part of another
composite clip and you care about its transparency.
"""
def __init__(self, clips, size=None, bg_color=None, transparent=False,
ismask=False):
if size is None:
size = clips[0].size
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.transparent = transparent
self.bg_color = bg_color
self.bg = ColorClip(size, col=self.bg_color).get_frame(0)
# compute duration
ends = [c.end for c in self.clips]
if not any([(e is None) for e in ends]):
self.duration = max(ends)
# compute audio
audioclips = [v.audio for v in self.clips if v.audio != None]
if len(audioclips) > 0:
self.audio = CompositeAudioClip(audioclips)
# compute mask
if transparent:
maskclips = [c.mask.set_pos(c.pos) for c in self.clips]
self.mask = CompositeVideoClip(maskclips,self.size,
transparent=False, ismask=True)
def gf(t):
""" The clips playing at time `t` are blitted over one
another. """
f = self.bg
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.get_frame = gf
def playing_clips(self, t=0):
""" Returns a list of the clips in the composite clips that are
actually playing at the given time `t`. """
return [c for c in self.clips if c.is_playing(t)]
def clips_array(array, rows_widths=None, cols_widths=None,
transparent = True, bg_color = (0,0,0)):
array = np.array(array)
sizes_array = np.vectorize(lambda c:c.size)(array)
if rows_widths == None:
rows_widths = sizes_array.max(axis=0)
if cols_widths == None:
cols_widths = sizes_array.max(axis=1)
xx = np.cumsum([0]+list(cols_width))
yy = np.cumsum([0]+list(rows_width))
for j,(x,rw) in enumerate(zip(xx,cols_width)):
for i,(y,cw) in enumerate(zip(yy,cols_width)):
clip = array[i,j]
w,h = clip.size
if (w < cw) or (h < rw):
clip = CompositeClip([clip], size = (cw,rw),
transparent = True, bg_color = (0,0,0))
array[i,j] = clip.set_pos((x,y))
return CompositeVideoClip(array.flatten(),
size = (xx[-1],yy[-1]))
| mit |
Vibek/Human_intention | src/skeleton_markers/msg/_recognitionActionFeedback.py | 1 | 2852 | """autogenerated by genpy from skeleton_markers/recognitionActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class recognitionActionFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "skeleton_markers/recognitionActionFeedback"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Define a feedback message
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(recognitionActionFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| mit |
eusi/MissionPlanerHM | Lib/site-packages/scipy/signal/setup.py | 51 | 1049 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'sigtools.c',
'firfilter.c','medianfilter.c',
'lfilter.c.src', 'correlate_nd.c.src'],
depends = ['sigtools.h'],
include_dirs=['.']
)
config.add_extension('spectral', sources=['spectral.c'])
config.add_extension('spline',
sources = ['splinemodule.c','S_bspline_util.c','D_bspline_util.c',
'C_bspline_util.c','Z_bspline_util.c','bspline_util.c'],
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| gpl-3.0 |
yfried/ansible | lib/ansible/modules/cloud/rackspace/rax_files_objects.py | 102 | 18489 | #!/usr/bin/python
# (c) 2013, Paul Durivage <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
type: bool
default: 'no'
container:
description:
- The container to use for file object operations.
required: true
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
type: bool
default: 'yes'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment:
- rackspace
- rackspace.openstack
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects:
container: testcont
dest: ~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects:
container: testcont
src: file1
dest: ~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects:
container: testcont
src: file1,file2,file3
dest: ~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects:
container: testcont
method: delete
dest: file1
- name: "Delete several objects in test container"
rax_files_objects:
container: testcont
method: delete
dest: file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects:
container: testcont
method: delete
- name: "Upload all files to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: [email protected]
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects:
container: testcont
method: put
src: ~/Downloads/testcont/file3
expires: 60
- name: "Attempt to get remote object that does not exist"
rax_files_objects:
container: testcont
method: get
src: FileThatDoesNotExist.jpg
dest: ~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects:
container: testcont
method: delete
dest: FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects:
container: testcont
type: meta
dest: file2
- name: "Get metadata on several objects"
rax_files_objects:
container: testcont
type: meta
src: file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects:
container: testcont
type: meta
src: file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects:
container: testcont
type: meta
'''
import os
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _upload_folder(cf, folder, container, ttl=None, headers=None):
""" Uploads a folder to Cloud Files.
"""
total_bytes = 0
for root, dirs, files in os.walk(folder):
for fname in files:
full_path = os.path.join(root, fname)
obj_name = os.path.relpath(full_path, folder)
obj_size = os.path.getsize(full_path)
cf.upload_file(container, full_path,
obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
total_bytes += obj_size
return total_bytes
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
if not src:
module.fail_json(msg='src must be specified when uploading')
c = _get_container(module, cf, container)
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
total_bytes = 0
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
elif is_dir:
try:
total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
try:
cont_obj = c.upload_file(src, ttl=expires, headers=meta)
except Exception as e:
module.fail_json(msg=e.message)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or total_bytes > 0:
EXIT_DICT['changed'] = True
if meta:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception as e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject as e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception as e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
if __name__ == '__main__':
main()
| gpl-3.0 |
cledio66/pyglet | contrib/layout/layout/Plex/Actions.py | 32 | 2348 | #=======================================================================
#
# Python Lexical Analyser
#
# Actions for use in token specifications
#
#=======================================================================
class Action:
def same_as(self, other):
return self is other
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
value = None
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def same_as(self, other):
return isinstance(other, Return) and self.value == other.value
def __repr__(self):
return "Return(%s)" % repr(self.value)
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
function = None
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
def same_as(self, other):
return isinstance(other, Call) and self.function is other.function
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
state_name = None
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
def same_as(self, other):
return isinstance(other, Begin) and self.state_name == other.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
IGNORE.__doc__ = Ignore.__doc__
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
TEXT.__doc__ = Text.__doc__
| bsd-3-clause |
rphlypo/parietalretreat | setup_data_path_salma.py | 1 | 6001 | import glob
import os.path
from pandas import DataFrame
import pandas
def get_all_paths(data_set=None, root_dir="/"):
# TODO
# if data_set ... collections.Sequence
# iterate over list
if data_set is None:
data_set = {"hcp", "henson2010faces", "ds105", "ds107"}
list_ = list()
head, tail_ = os.path.split(root_dir)
counter = 0
while tail_:
head, tail_ = os.path.split(head)
counter += 1
if hasattr(data_set, "__iter__"):
df_ = list()
for ds in data_set:
df_.append(get_all_paths(data_set=ds, root_dir=root_dir))
df = pandas.concat(df_, keys=data_set)
elif data_set.startswith("ds") or data_set == "henson2010faces":
base_path = os.path.join(root_dir,
"storage/workspace/brainpedia/preproc/",
data_set)
with open(os.path.join(base_path, "scan_key.txt")) as file_:
TR = file_.readline()[3:-1]
for fun_path in glob.iglob(os.path.join(base_path,
"sub*/model/model*/"
"BOLD/task*/bold.nii.gz")):
head, tail_ = os.path.split(fun_path)
tail = [tail_]
while tail_:
head, tail_ = os.path.split(head)
tail.append(tail_)
tail.reverse()
subj_id = tail[6 + counter][-3:]
model = tail[8 + counter][-3:]
task, run = tail[10 + counter].split("_")
tmp_base = os.path.split(os.path.split(fun_path)[0])[0]
anat = os.path.join(tmp_base,
"anatomy",
"highres{}.nii.gz".format(model[-3:]))
onsets = glob.glob(os.path.join(tmp_base, "onsets",
"task{}_run{}".format(task, run),
"cond*.txt"))
confds = os.path.join(os.path.split(fun_path)[0], "motion.txt")
list_.append({"subj_id": subj_id,
"model": model,
"task": task[-3:],
"run": run[-3:],
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": TR})
if onsets:
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
elif data_set == "hcp":
base_path = os.path.join(root_dir, "storage/data/HCP/Q2/")
for fun_path in glob.iglob(os.path.join(base_path,
"*/MNINonLinear/Results/",
"*/*.nii.gz")):
head, tail = os.path.split(fun_path)
if head[-2:] not in ["LR", "RL"]:
continue
tail = [tail]
while head != "/":
head, t = os.path.split(head)
tail.append(t)
if tail[0][:-7] != tail[1]:
continue
tail.reverse()
subj_id = tail[4 + counter]
task = tail[7 + counter][6:-3]
if tail[7 + counter].startswith("rfMRI"):
run = task[-1]
task = task[:-1]
mode = tail[7 + counter][-2:]
anat = os.path.join(base_path, subj_id, "MNINonLinear/T1w.nii.gz")
confds = os.path.join(os.path.split(fun_path)[0],
"Movement_Regressors.txt")
list_.append({"subj_id": subj_id,
"task": task,
"mode": mode,
"func": fun_path,
"anat": anat,
"confds": confds,
"TR": 0.72})
if tail[8 + counter].startswith("rfMRI"):
list_[-1]["run"] = run
else:
onsets = [onset
for onset in glob.glob(os.path.join(
os.path.split(fun_path)[0], "EVs/*.txt"))
if os.path.split(onset)[1][0] != "S"]
list_[-1]["onsets"] = onsets
df = DataFrame(list_)
return df
if __name__ == "__main__":
from nilearn.input_data import MultiNiftiMasker, NiftiMapsMasker
from joblib import Memory, Parallel, delayed
import joblib
from sklearn.base import clone
import nibabel
root_dir = "/media/Elements/volatile/new/salma"
mem = Memory(cachedir=os.path.join(root_dir,
("storage/workspace/brainpedia"
"/preproc/henson2010faces/dump/")))
print "Loading all paths and variables into memory"
df = get_all_paths(root_dir=root_dir,
data_set=["henson2010faces"])
target_affine_ = nibabel.load(df["func"][0]).get_affine()
target_shape_ = nibabel.load(df["func"][0]).shape[:-1]
print "preparing and running MultiNiftiMasker"
mnm = MultiNiftiMasker(mask_strategy="epi", memory=mem, n_jobs=1,
verbose=10, target_affine=target_affine_,
target_shape=target_shape_)
mask_img = mnm.fit(list(df["func"])).mask_img_
print "preparing and running NiftiMapsMasker"
nmm = NiftiMapsMasker(
maps_img=os.path.join("/usr/share/fsl/data/atlases/HarvardOxford/",
"HarvardOxford-cortl-prob-2mm.nii.gz"),
mask_img=mask_img, detrend=True, smoothing_fwhm=5, standardize=True,
low_pass=None, high_pass=None, memory=mem, verbose=10)
region_ts = [clone(nmm).fit_transform(niimg, n_hv_confounds=5)
for niimg in list(df["func"])]
joblib.dump(region_ts, "/home/storage/workspace/rphlypo/retreat/results/")
region_signals = DataFrame({"region_signals": region_ts}, index=df.index)
df.join(region_signals)
| bsd-2-clause |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/protocols/pcp.py | 71 | 7090 | # -*- test-case-name: twisted.test.test_pcp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Producer-Consumer Proxy.
"""
from zope.interface import implements
from twisted.internet import interfaces
class BasicProducerConsumerProxy:
"""
I can act as a man in the middle between any Producer and Consumer.
@ivar producer: the Producer I subscribe to.
@type producer: L{IProducer<interfaces.IProducer>}
@ivar consumer: the Consumer I publish to.
@type consumer: L{IConsumer<interfaces.IConsumer>}
@ivar paused: As a Producer, am I paused?
@type paused: bool
"""
implements(interfaces.IProducer, interfaces.IConsumer)
consumer = None
producer = None
producerIsStreaming = None
iAmStreaming = True
outstandingPull = False
paused = False
stopped = False
def __init__(self, consumer):
self._buffer = []
if consumer is not None:
self.consumer = consumer
consumer.registerProducer(self, self.iAmStreaming)
# Producer methods:
def pauseProducing(self):
self.paused = True
if self.producer:
self.producer.pauseProducing()
def resumeProducing(self):
self.paused = False
if self._buffer:
# TODO: Check to see if consumer supports writeSeq.
self.consumer.write(''.join(self._buffer))
self._buffer[:] = []
else:
if not self.iAmStreaming:
self.outstandingPull = True
if self.producer is not None:
self.producer.resumeProducing()
def stopProducing(self):
if self.producer is not None:
self.producer.stopProducing()
if self.consumer is not None:
del self.consumer
# Consumer methods:
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
self.consumer.write(data)
self.outstandingPull = False
def finish(self):
if self.consumer is not None:
self.consumer.finish()
self.unregisterProducer()
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerIsStreaming = streaming
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
if self.consumer:
self.consumer.unregisterProducer()
def __repr__(self):
return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
class ProducerConsumerProxy(BasicProducerConsumerProxy):
"""ProducerConsumerProxy with a finite buffer.
When my buffer fills up, I have my parent Producer pause until my buffer
has room in it again.
"""
# Copies much from abstract.FileDescriptor
bufferSize = 2**2**2**2
producerPaused = False
unregistered = False
def pauseProducing(self):
# Does *not* call up to ProducerConsumerProxy to relay the pause
# message through to my parent Producer.
self.paused = True
def resumeProducing(self):
self.paused = False
if self._buffer:
data = ''.join(self._buffer)
bytesSent = self._writeSomeData(data)
if bytesSent < len(data):
unsent = data[bytesSent:]
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer[:] = [unsent]
else:
self._buffer[:] = []
else:
bytesSent = 0
if (self.unregistered and bytesSent and not self._buffer and
self.consumer is not None):
self.consumer.unregisterProducer()
if not self.iAmStreaming:
self.outstandingPull = not bytesSent
if self.producer is not None:
bytesBuffered = sum([len(s) for s in self._buffer])
# TODO: You can see here the potential for high and low
# watermarks, where bufferSize would be the high mark when we
# ask the upstream producer to pause, and we wouldn't have
# it resume again until it hit the low mark. Or if producer
# is Pull, maybe we'd like to pull from it as much as necessary
# to keep our buffer full to the low mark, so we're never caught
# without something to send.
if self.producerPaused and (bytesBuffered < self.bufferSize):
# Now that our buffer is empty,
self.producerPaused = False
self.producer.resumeProducing()
elif self.outstandingPull:
# I did not have any data to write in response to a pull,
# so I'd better pull some myself.
self.producer.resumeProducing()
def write(self, data):
if self.paused or (not self.iAmStreaming and not self.outstandingPull):
# We could use that fifo queue here.
self._buffer.append(data)
elif self.consumer is not None:
assert not self._buffer, (
"Writing fresh data to consumer before my buffer is empty!")
# I'm going to use _writeSomeData here so that there is only one
# path to self.consumer.write. But it doesn't actually make sense,
# if I am streaming, for some data to not be all data. But maybe I
# am not streaming, but I am writing here anyway, because there was
# an earlier request for data which was not answered.
bytesSent = self._writeSomeData(data)
self.outstandingPull = False
if not bytesSent == len(data):
assert not self.iAmStreaming, (
"Streaming producer did not write all its data.")
self._buffer.append(data[bytesSent:])
if (self.producer is not None) and self.producerIsStreaming:
bytesBuffered = sum([len(s) for s in self._buffer])
if bytesBuffered >= self.bufferSize:
self.producer.pauseProducing()
self.producerPaused = True
def registerProducer(self, producer, streaming):
self.unregistered = False
BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
if self.producer is not None:
del self.producer
del self.producerIsStreaming
self.unregistered = True
if self.consumer and not self._buffer:
self.consumer.unregisterProducer()
def _writeSomeData(self, data):
"""Write as much of this data as possible.
@returns: The number of bytes written.
"""
if self.consumer is None:
return 0
self.consumer.write(data)
return len(data)
| agpl-3.0 |
hpcugent/easybuild-framework | easybuild/toolchains/gmpolf.py | 2 | 1847 | ##
# Copyright 2013-2019 Ghent University
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for gmpolf compiler toolchain (includes GCC, MPICH2, OpenBLAS, LAPACK, ScaLAPACK and FFTW).
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI") (copy from...)
:author: Bart Verleye (University of Auckland)
"""
from easybuild.toolchains.gmpich import Gmpich
from easybuild.toolchains.golf import Golf
from easybuild.toolchains.fft.fftw import Fftw
from easybuild.toolchains.linalg.openblas import OpenBLAS
from easybuild.toolchains.linalg.scalapack import ScaLAPACK
class Gmpolf(Gmpich, OpenBLAS, ScaLAPACK, Fftw):
"""Compiler toolchain with GCC, MPICH, OpenBLAS, ScaLAPACK and FFTW."""
NAME = 'gmpolf'
SUBTOOLCHAIN = [Gmpich.NAME, Golf.NAME]
| gpl-2.0 |
mahirkukreja/delos3geurkernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
cuongnv23/ansible | lib/ansible/modules/database/postgresql/postgresql_schema.py | 29 | 8233 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_schema
short_description: Add or remove PostgreSQL schema from a remote host
description:
- Add or remove PostgreSQL schema from a remote host.
version_added: "2.3"
options:
name:
description:
- Name of the schema to add or remove.
required: true
default: null
database:
description:
- Name of the database to connect to.
required: false
default: postgres
login_user:
description:
- The username used to authenticate with.
required: false
default: null
login_password:
description:
- The password used to authenticate with.
required: false
default: null
login_host:
description:
- Host running the database.
required: false
default: localhost
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
required: false
default: null
owner:
description:
- Name of the role to set as owner of the schema.
required: false
default: null
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The schema state.
required: false
default: present
choices: [ "present", "absent" ]
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
using this module.
requirements: [ psycopg2 ]
author: "Flavien Chantelot <[email protected]>"
'''
EXAMPLES = '''
# Create a new schema with name "acme"
- postgresql_schema:
name: acme
# Create a new schema "acme" with a user "bob" who will own it
- postgresql_schema:
name: acme
owner: bob
'''
RETURN = '''
schema:
description: Name of the schema
returned: success, changed
type: string
sample: "acme"
'''
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, schema, owner):
query = "ALTER SCHEMA %s OWNER TO %s" % (
pg_quote_identifier(schema, 'schema'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_schema_info(cursor, schema):
query = """
SELECT schema_owner AS owner
FROM information_schema.schemata
WHERE schema_name = %(schema)s
"""
cursor.execute(query, {'schema': schema})
return cursor.fetchone()
def schema_exists(cursor, schema):
query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = %(schema)s"
cursor.execute(query, {'schema': schema})
return cursor.rowcount == 1
def schema_delete(cursor, schema):
if schema_exists(cursor, schema):
query = "DROP SCHEMA %s" % pg_quote_identifier(schema, 'schema')
cursor.execute(query)
return True
else:
return False
def schema_create(cursor, schema, owner):
if not schema_exists(cursor, schema):
query_fragments = ['CREATE SCHEMA %s' % pg_quote_identifier(schema, 'schema')]
if owner:
query_fragments.append('AUTHORIZATION %s' % pg_quote_identifier(owner, 'role'))
query = ' '.join(query_fragments)
cursor.execute(query)
return True
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return set_owner(cursor, schema, owner)
else:
return False
def schema_matches(cursor, schema, owner):
if not schema_exists(cursor, schema):
return False
else:
schema_info = get_schema_info(cursor, schema)
if owner and owner != schema_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
login_unix_socket=dict(default=""),
port=dict(default="5432"),
schema=dict(required=True, aliases=['name']),
owner=dict(default=""),
database=dict(default="postgres"),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
schema = module.params["schema"]
owner = module.params["owner"]
state = module.params["state"]
database = module.params["database"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.items()
if k in params_map and v != '' )
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(database=database, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = not schema_exists(cursor, schema)
elif state == "present":
changed = not schema_matches(cursor, schema, owner)
module.exit_json(changed=changed, schema=schema)
if state == "absent":
try:
changed = schema_delete(cursor, schema)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = schema_create(cursor, schema, owner)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, schema=schema)
if __name__ == '__main__':
main()
| gpl-3.0 |
yatinkumbhare/openstack-nova | nova/api/openstack/compute/plugins/v3/block_device_mapping_v1.py | 37 | 2595 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The legacy block device mappings extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack.compute.schemas.v3 import block_device_mapping_v1 as \
schema_block_device_mapping
from nova.api.openstack import extensions
from nova.i18n import _
ALIAS = "os-block-device-mapping-v1"
ATTRIBUTE_NAME = "block_device_mapping"
ATTRIBUTE_NAME_V2 = "block_device_mapping_v2"
class BlockDeviceMappingV1(extensions.V3APIExtensionBase):
"""Block device mapping boot support."""
name = "BlockDeviceMappingV1"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
return []
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
block_device_mapping_v2 = server_dict.get(ATTRIBUTE_NAME_V2, [])
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
for bdm in block_device_mapping:
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if block_device_mapping:
create_kwargs['block_device_mapping'] = block_device_mapping
# Sets the legacy_bdm flag if we got a legacy block device mapping.
create_kwargs['legacy_bdm'] = True
def get_server_create_schema(self):
return schema_block_device_mapping.server_create
| apache-2.0 |
mysociety/yournextmp-popit | candidates/models/address.py | 2 | 3191 | from __future__ import unicode_literals
from collections import defaultdict
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.six.moves.urllib_parse import urljoin
from django.utils.text import slugify
from django.utils.translation import ugettext as _
from pygeocoder import Geocoder, GeocoderError
import requests
from elections.models import Election
from candidates.election_specific import get_local_area_id
# We use this both for validation of address and the results of the
# lookup, so the MapIt and geocoder lookups should be cached so we
# don't make double requests:
def check_address(address_string, country=None):
tidied_address_before_country = address_string.strip()
if country is None:
tidied_address = tidied_address_before_country
else:
tidied_address = tidied_address_before_country + ', ' + country
try:
location_results = Geocoder.geocode(tidied_address)
except GeocoderError:
message = _("Failed to find a location for '{0}'")
raise ValidationError(message.format(tidied_address_before_country))
lat, lon = location_results[0].coordinates
all_mapit_json = []
queries_to_try = defaultdict(set)
for election in Election.objects.current().prefetch_related('area_types'):
area_types = election.area_types.values_list('name', flat=True)
queries_to_try[election.area_generation].update(area_types)
for area_generation, area_types in queries_to_try.items():
mapit_lookup_url = urljoin(settings.MAPIT_BASE_URL,
'point/4326/{lon},{lat}'.format(
lon=lon,
lat=lat,
))
mapit_lookup_url += '?type=' + ','.join(area_types)
mapit_lookup_url += '&generation={0}'.format(election.area_generation)
mapit_result = requests.get(mapit_lookup_url)
mapit_json = mapit_result.json()
if 'error' in mapit_json:
message = _("The area lookup returned an error: '{error}'")
raise ValidationError(message.format(error=mapit_json['error']))
all_mapit_json += mapit_json.items()
sorted_mapit_results = sorted(
all_mapit_json,
key=lambda t: (t[1]['type'], int(t[0]))
)
if not sorted_mapit_results:
message = _("The address '{0}' appears to be outside the area this site knows about")
raise ValidationError(message.format(tidied_address_before_country))
types_and_areas = [
{
'area_type_code': a[1]['type'],
'area_id': get_local_area_id(a),
}
for a in sorted_mapit_results
]
if settings.AREAS_TO_ALWAYS_RETURN:
types_and_areas += settings.AREAS_TO_ALWAYS_RETURN
types_and_areas_joined = ','.join(
'{area_type_code}-{area_id}'.format(**ta) for ta in types_and_areas
)
area_slugs = [slugify(a[1]['name']) for a in sorted_mapit_results]
ignored_slug = '-'.join(area_slugs)
return {
'type_and_area_ids': types_and_areas_joined,
'ignored_slug': ignored_slug,
}
| agpl-3.0 |
CSysTeam/SecurityPackage | RC4/RC4Test.py | 1 | 1105 | import unittest
from RC4 import RC4
class RC4Test(unittest.TestCase):
def test_RC4TestEnc1(self):
algorithm = RC4()
cipher = algorithm.Encrypt("abcd", "test")
self.assertEqual(cipher, "ÏíDu")
def test_RC4TestDec1(self):
algorithm = RC4()
cipher = algorithm.Decrypt("ÏíDu", "test")
self.assertEqual(cipher, "abcd")
def test_RC4TestEnc2(self):
algorithm = RC4()
cipher = algorithm.Encrypt("0x61626364", "0x74657374")
self.assertTrue(cipher, "0xcfed4475")
def test_RC4TestDec2(self):
algorithm = RC4()
cipher = algorithm.Encrypt("0xcfed4475", "0x74657374")
self.assertTrue(cipher, "0x61626364")
def test_RC4TestEnc(self):
algorithm = RC4()
cipher = algorithm.Encrypt("aaaa", "test")
self.assertEqual(cipher, "ÏîFp")
def test_RC4TestDec(self):
algorithm = RC4()
cipher = algorithm.Decrypt("ÏîFp", "test")
self.assertEqual(cipher, "aaaa")
| gpl-3.0 |
sivu22/nltk-on-gae | GAE/nltk/corpus/util.py | 5 | 2934 | # Natural Language Toolkit: Corpus Reader Utility Functions
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
######################################################################
#{ Lazy Corpus Loader
######################################################################
import re
import nltk
TRY_ZIPFILE_FIRST = False
class LazyCorpusLoader(object):
"""
A proxy object which is used to stand in for a corpus object
before the corpus is loaded. This allows NLTK to create an object
for each corpus, but defer the costs associated with loading those
corpora until the first time that they're actually accessed.
The first time this object is accessed in any way, it will load
the corresponding corpus, and transform itself into that corpus
(by modifying its own ``__class__`` and ``__dict__`` attributes).
If the corpus can not be found, then accessing this object will
raise an exception, displaying installation instructions for the
NLTK data package. Once they've properly installed the data
package (or modified ``nltk.data.path`` to point to its location),
they can then use the corpus object without restarting python.
"""
def __init__(self, name, reader_cls, *args, **kwargs):
from nltk.corpus.reader.api import CorpusReader
assert issubclass(reader_cls, CorpusReader)
self.__name = self.__name__ = name
self.__reader_cls = reader_cls
self.__args = args
self.__kwargs = kwargs
def __load(self):
# Find the corpus root directory.
zip_name = re.sub(r'(([^/]*)(/.*)?)', r'\2.zip/\1/', self.__name)
if TRY_ZIPFILE_FIRST:
try:
root = nltk.data.find('corpora/%s' % zip_name)
except LookupError:
raise
root = nltk.data.find('corpora/%s' % self.__name)
else:
try:
root = nltk.data.find('corpora/%s' % self.__name)
except LookupError as e:
try: root = nltk.data.find('corpora/%s' % zip_name)
except LookupError: raise e
# Load the corpus.
corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
# This is where the magic happens! Transform ourselves into
# the corpus by modifying our own __dict__ and __class__ to
# match that of the corpus.
self.__dict__ = corpus.__dict__
self.__class__ = corpus.__class__
def __getattr__(self, attr):
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
def __repr__(self):
return '<%s in %r (not loaded yet)>' % (
self.__reader_cls.__name__, '.../corpora/'+self.__name)
| apache-2.0 |
benjaminjkraft/django | tests/template_tests/test_engine.py | 116 | 1925 | import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
class RenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
| bsd-3-clause |
ekalosak/boto | boto/swf/__init__.py | 145 | 1792 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
from boto.regioninfo import get_regions, load_regions
import boto.swf.layer1
REGION_ENDPOINTS = load_regions().get('swf', {})
def regions(**kw_params):
"""
Get all available regions for the Amazon Simple Workflow service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
return get_regions('swf', connection_cls=boto.swf.layer1.Layer1)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
MQQiang/kbengine | kbe/src/lib/python/Lib/ctypes/wintypes.py | 197 | 5628 | # The most useful windows datatypes
import ctypes
BYTE = ctypes.c_byte
WORD = ctypes.c_ushort
DWORD = ctypes.c_ulong
#UCHAR = ctypes.c_uchar
CHAR = ctypes.c_char
WCHAR = ctypes.c_wchar
UINT = ctypes.c_uint
INT = ctypes.c_int
DOUBLE = ctypes.c_double
FLOAT = ctypes.c_float
BOOLEAN = BYTE
BOOL = ctypes.c_long
class VARIANT_BOOL(ctypes._SimpleCData):
_type_ = "v"
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.value)
ULONG = ctypes.c_ulong
LONG = ctypes.c_long
USHORT = ctypes.c_ushort
SHORT = ctypes.c_short
# in the windows header files, these are structures.
_LARGE_INTEGER = LARGE_INTEGER = ctypes.c_longlong
_ULARGE_INTEGER = ULARGE_INTEGER = ctypes.c_ulonglong
LPCOLESTR = LPOLESTR = OLESTR = ctypes.c_wchar_p
LPCWSTR = LPWSTR = ctypes.c_wchar_p
LPCSTR = LPSTR = ctypes.c_char_p
LPCVOID = LPVOID = ctypes.c_void_p
# WPARAM is defined as UINT_PTR (unsigned type)
# LPARAM is defined as LONG_PTR (signed type)
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulong
LPARAM = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulonglong
LPARAM = ctypes.c_longlong
ATOM = WORD
LANGID = WORD
COLORREF = DWORD
LGRPID = DWORD
LCTYPE = DWORD
LCID = DWORD
################################################################
# HANDLE types
HANDLE = ctypes.c_void_p # in the header files: void *
HACCEL = HANDLE
HBITMAP = HANDLE
HBRUSH = HANDLE
HCOLORSPACE = HANDLE
HDC = HANDLE
HDESK = HANDLE
HDWP = HANDLE
HENHMETAFILE = HANDLE
HFONT = HANDLE
HGDIOBJ = HANDLE
HGLOBAL = HANDLE
HHOOK = HANDLE
HICON = HANDLE
HINSTANCE = HANDLE
HKEY = HANDLE
HKL = HANDLE
HLOCAL = HANDLE
HMENU = HANDLE
HMETAFILE = HANDLE
HMODULE = HANDLE
HMONITOR = HANDLE
HPALETTE = HANDLE
HPEN = HANDLE
HRGN = HANDLE
HRSRC = HANDLE
HSTR = HANDLE
HTASK = HANDLE
HWINSTA = HANDLE
HWND = HANDLE
SC_HANDLE = HANDLE
SERVICE_STATUS_HANDLE = HANDLE
################################################################
# Some important structure definitions
class RECT(ctypes.Structure):
_fields_ = [("left", LONG),
("top", LONG),
("right", LONG),
("bottom", LONG)]
tagRECT = _RECTL = RECTL = RECT
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
SMALL_RECT = _SMALL_RECT
class _COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class POINT(ctypes.Structure):
_fields_ = [("x", LONG),
("y", LONG)]
tagPOINT = _POINTL = POINTL = POINT
class SIZE(ctypes.Structure):
_fields_ = [("cx", LONG),
("cy", LONG)]
tagSIZE = SIZEL = SIZE
def RGB(red, green, blue):
return red + (green << 8) + (blue << 16)
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", DWORD),
("dwHighDateTime", DWORD)]
_FILETIME = FILETIME
class MSG(ctypes.Structure):
_fields_ = [("hWnd", HWND),
("message", UINT),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)]
tagMSG = MSG
MAX_PATH = 260
class WIN32_FIND_DATAA(ctypes.Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", CHAR * MAX_PATH),
("cAlternateFileName", CHAR * 14)]
class WIN32_FIND_DATAW(ctypes.Structure):
_fields_ = [("dwFileAttributes", DWORD),
("ftCreationTime", FILETIME),
("ftLastAccessTime", FILETIME),
("ftLastWriteTime", FILETIME),
("nFileSizeHigh", DWORD),
("nFileSizeLow", DWORD),
("dwReserved0", DWORD),
("dwReserved1", DWORD),
("cFileName", WCHAR * MAX_PATH),
("cAlternateFileName", WCHAR * 14)]
################################################################
# Pointer types
LPBOOL = PBOOL = ctypes.POINTER(BOOL)
PBOOLEAN = ctypes.POINTER(BOOLEAN)
LPBYTE = PBYTE = ctypes.POINTER(BYTE)
PCHAR = ctypes.POINTER(CHAR)
LPCOLORREF = ctypes.POINTER(COLORREF)
LPDWORD = PDWORD = ctypes.POINTER(DWORD)
LPFILETIME = PFILETIME = ctypes.POINTER(FILETIME)
PFLOAT = ctypes.POINTER(FLOAT)
LPHANDLE = PHANDLE = ctypes.POINTER(HANDLE)
PHKEY = ctypes.POINTER(HKEY)
LPHKL = ctypes.POINTER(HKL)
LPINT = PINT = ctypes.POINTER(INT)
PLARGE_INTEGER = ctypes.POINTER(LARGE_INTEGER)
PLCID = ctypes.POINTER(LCID)
LPLONG = PLONG = ctypes.POINTER(LONG)
LPMSG = PMSG = ctypes.POINTER(MSG)
LPPOINT = PPOINT = ctypes.POINTER(POINT)
PPOINTL = ctypes.POINTER(POINTL)
LPRECT = PRECT = ctypes.POINTER(RECT)
LPRECTL = PRECTL = ctypes.POINTER(RECTL)
LPSC_HANDLE = ctypes.POINTER(SC_HANDLE)
PSHORT = ctypes.POINTER(SHORT)
LPSIZE = PSIZE = ctypes.POINTER(SIZE)
LPSIZEL = PSIZEL = ctypes.POINTER(SIZEL)
PSMALL_RECT = ctypes.POINTER(SMALL_RECT)
LPUINT = PUINT = ctypes.POINTER(UINT)
PULARGE_INTEGER = ctypes.POINTER(ULARGE_INTEGER)
PULONG = ctypes.POINTER(ULONG)
PUSHORT = ctypes.POINTER(USHORT)
PWCHAR = ctypes.POINTER(WCHAR)
LPWIN32_FIND_DATAA = PWIN32_FIND_DATAA = ctypes.POINTER(WIN32_FIND_DATAA)
LPWIN32_FIND_DATAW = PWIN32_FIND_DATAW = ctypes.POINTER(WIN32_FIND_DATAW)
LPWORD = PWORD = ctypes.POINTER(WORD)
| lgpl-3.0 |
lokeshh/lokeshh-stem | test/unit/response/authchallenge.py | 9 | 2148 | """
Unit tests for the stem.response.authchallenge.AuthChallengeResponse class.
"""
import unittest
import stem.response
import stem.response.authchallenge
import stem.socket
from test import mocking
VALID_RESPONSE = '250 AUTHCHALLENGE \
SERVERHASH=B16F72DACD4B5ED1531F3FCC04B593D46A1E30267E636EA7C7F8DD7A2B7BAA05 \
SERVERNONCE=653574272ABBB49395BD1060D642D653CFB7A2FCE6A4955BCFED819703A9998C'
VALID_HASH = b'\xb1or\xda\xcdK^\xd1S\x1f?\xcc\x04\xb5\x93\xd4j\x1e0&~cn\xa7\xc7\xf8\xddz+{\xaa\x05'
VALID_NONCE = b"e5t'*\xbb\xb4\x93\x95\xbd\x10`\xd6B\xd6S\xcf\xb7\xa2\xfc\xe6\xa4\x95[\xcf\xed\x81\x97\x03\xa9\x99\x8c"
INVALID_RESPONSE = '250 AUTHCHALLENGE \
SERVERHASH=FOOBARB16F72DACD4B5ED1531F3FCC04B593D46A1E30267E636EA7C7F8DD7A2B7BAA05 \
SERVERNONCE=FOOBAR653574272ABBB49395BD1060D642D653CFB7A2FCE6A4955BCFED819703A9998C'
class TestAuthChallengeResponse(unittest.TestCase):
def test_valid_response(self):
"""
Parses valid AUTHCHALLENGE responses.
"""
control_message = mocking.get_message(VALID_RESPONSE)
stem.response.convert('AUTHCHALLENGE', control_message)
# now this should be a AuthChallengeResponse (ControlMessage subclass)
self.assertTrue(isinstance(control_message, stem.response.ControlMessage))
self.assertTrue(isinstance(control_message, stem.response.authchallenge.AuthChallengeResponse))
self.assertEqual(VALID_HASH, control_message.server_hash)
self.assertEqual(VALID_NONCE, control_message.server_nonce)
def test_invalid_responses(self):
"""
Tries to parse various malformed responses and checks it they raise
appropriate exceptions.
"""
auth_challenge_comp = VALID_RESPONSE.split()
for index in range(1, len(auth_challenge_comp)):
# Attempts to parse a message without this item. The first item is
# skipped because, without the 250 code, the message won't be
# constructed.
remaining_comp = auth_challenge_comp[:index] + auth_challenge_comp[index + 1:]
control_message = mocking.get_message(' '.join(remaining_comp))
self.assertRaises(stem.ProtocolError, stem.response.convert, 'AUTHCHALLENGE', control_message)
| lgpl-3.0 |
wenyu1001/scrapy | tests/test_downloadermiddleware_decompression.py | 133 | 1851 | from unittest import TestCase, main
from scrapy.http import Response, XmlResponse
from scrapy.downloadermiddlewares.decompression import DecompressionMiddleware
from scrapy.spiders import Spider
from tests import get_testdata
from scrapy.utils.test import assert_samelines
def _test_data(formats):
uncompressed_body = get_testdata('compressed', 'feed-sample1.xml')
test_responses = {}
for format in formats:
body = get_testdata('compressed', 'feed-sample1.' + format)
test_responses[format] = Response('http://foo.com/bar', body=body)
return uncompressed_body, test_responses
class DecompressionMiddlewareTest(TestCase):
test_formats = ['tar', 'xml.bz2', 'xml.gz', 'zip']
uncompressed_body, test_responses = _test_data(test_formats)
def setUp(self):
self.mw = DecompressionMiddleware()
self.spider = Spider('foo')
def test_known_compression_formats(self):
for fmt in self.test_formats:
rsp = self.test_responses[fmt]
new = self.mw.process_response(None, rsp, self.spider)
assert isinstance(new, XmlResponse), \
'Failed %s, response type %s' % (fmt, type(new).__name__)
assert_samelines(self, new.body, self.uncompressed_body, fmt)
def test_plain_response(self):
rsp = Response(url='http://test.com', body=self.uncompressed_body)
new = self.mw.process_response(None, rsp, self.spider)
assert new is rsp
assert_samelines(self, new.body, rsp.body)
def test_empty_response(self):
rsp = Response(url='http://test.com', body=b'')
new = self.mw.process_response(None, rsp, self.spider)
assert new is rsp
assert not rsp.body
assert not new.body
def tearDown(self):
del self.mw
if __name__ == '__main__':
main()
| bsd-3-clause |
embeddedarm/android_external_chromium_org | tools/telemetry/telemetry/page/page_filter_unittest.py | 25 | 4195 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.page import page as page_module
from telemetry.page import page_filter as page_filter_module
from telemetry.page import page_set
class MockUrlFilterOptions(object):
def __init__(self, page_filter, page_filter_exclude):
self.page_filter = page_filter
self.page_filter_exclude = page_filter_exclude
self.page_label_filter = None
self.page_label_filter_exclude = None
class MockLabelFilterOptions(object):
def __init__(self, page_label_filter, page_label_filter_exclude):
self.page_filter = None
self.page_filter_exclude = None
self.page_label_filter = page_label_filter
self.page_label_filter_exclude = page_label_filter_exclude
class PageFilterTest(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet()
self.p1 = page_module.Page(
'file://conformance/textures/tex-sub-image-2d.html',
ps,
{ 'name': 'WebglConformance.conformance_textures_tex_sub_image_2d' })
self.p2 = page_module.Page(
'file://othersuite/textures/tex-sub-image-3d.html',
ps,
{ 'name': 'OtherSuite.textures_tex_sub_image_3d' })
self.p3 = page_module.Page(
'file://othersuite/textures/tex-sub-image-3d.html',
ps,
{ 'name': None })
def testURLPattern(self):
options = MockUrlFilterOptions('conformance/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('somethingelse', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
def testName(self):
options = MockUrlFilterOptions('somethingelse', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('textures_tex_sub_image', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('WebglConformance', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertFalse(page_filter.IsSelected(self.p2))
options = MockUrlFilterOptions('OtherSuite', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
def testNameNone(self):
options = MockUrlFilterOptions('othersuite/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p3))
options = MockUrlFilterOptions('conformance/textures', '')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p3))
def testLabelFilters(self):
self.p1.label1 = True
self.p2.label1 = True
self.p3.label1 = False
self.p1.label2 = True
self.p2.label2 = False
self.p3.label2 = True
# Include both labels
options = MockLabelFilterOptions('label1,label2', '')
page_filter = page_filter_module.PageFilter(options)
self.assertTrue(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
self.assertTrue(page_filter.IsSelected(self.p3))
# Exclude takes priority
options = MockLabelFilterOptions('label1', 'label2')
page_filter = page_filter_module.PageFilter(options)
self.assertFalse(page_filter.IsSelected(self.p1))
self.assertTrue(page_filter.IsSelected(self.p2))
self.assertFalse(page_filter.IsSelected(self.p3))
| bsd-3-clause |
bartekjagiello/inteygrate_flaskapp | yowsup/layers/protocol_presence/protocolentities/iq_lastseen_result.py | 61 | 1028 | from yowsup.layers.protocol_iq.protocolentities.iq_result import ResultIqProtocolEntity
from yowsup.structs.protocoltreenode import ProtocolTreeNode
class ResultLastseenIqProtocolEntity(ResultIqProtocolEntity):
def __init__(self, jid, seconds, _id = None):
super(ResultLastseenIqProtocolEntity, self).__init__(_from=jid, _id=_id)
self.setSeconds(seconds)
def setSeconds(self, seconds):
self.seconds = int(seconds)
def getSeconds(self):
return self.seconds
def __str__(self):
out = super(ResultIqProtocolEntity, self).__str__()
out += "Seconds: %s\n" % self.seconds
return out
def toProtocolTreeNode(self):
node = super(ResultLastseenIqProtocolEntity, self).toProtocolTreeNode()
node.addChild(ProtocolTreeNode("query", {"seconds": str(self.seconds)}))
return node
@staticmethod
def fromProtocolTreeNode(node):
return ResultLastseenIqProtocolEntity(node["from"], node.getChild("query")["seconds"], node["id"]) | gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.