repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
flavour/eden
|
modules/feedparser5213.py
|
9
|
159822
|
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.2.1"
__license__ = """
Copyright 2010-2015 Kurt McKee <[email protected]>
Copyright 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
import warnings
from html.entities import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from io import StringIO as _StringIO
except ImportError:
from io import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
'''
:return: A :class:`FeedParserDict`.
'''
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError("object doesn't have key 'category'")
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in list(link.items()) if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']=='license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
'''
:return: A :class:`FeedParserDict`.
'''
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError("object has no attribute '%s'" % key)
def __hash__(self):
return id(self)
_cp1252 = {
128: chr(8364), # euro sign
130: chr(8218), # single low-9 quotation mark
131: chr( 402), # latin small letter f with hook
132: chr(8222), # double low-9 quotation mark
133: chr(8230), # horizontal ellipsis
134: chr(8224), # dagger
135: chr(8225), # double dagger
136: chr( 710), # modifier letter circumflex accent
137: chr(8240), # per mille sign
138: chr( 352), # latin capital letter s with caron
139: chr(8249), # single left-pointing angle quotation mark
140: chr( 338), # latin capital ligature oe
142: chr( 381), # latin capital letter z with caron
145: chr(8216), # left single quotation mark
146: chr(8217), # right single quotation mark
147: chr(8220), # left double quotation mark
148: chr(8221), # right double quotation mark
149: chr(8226), # bullet
150: chr(8211), # en dash
151: chr(8212), # em dash
152: chr( 732), # small tilde
153: chr(8482), # trade mark sign
154: chr( 353), # latin small letter s with caron
155: chr(8250), # single right-pointing angle quotation mark
156: chr( 339), # latin small ligature oe
158: chr( 382), # latin small letter z with caron
159: chr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, str):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urllib.parse.urljoin(base, uri)
except ValueError:
uri = ''
if not isinstance(uri, str):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if not self._matchnamespaces:
for k, v in list(self.namespaces.items()):
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, str):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = list(map(self._normalize_attributes, attrs))
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, str):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') != -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = chr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = chr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = 'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = 'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, str):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = 'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and not isinstance(output, str):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and isinstance(output, str):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, str):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords
if element in ('category', 'tags', 'itunes_keywords'):
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = output.replace('&', '&')
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if [t for t in re.findall(r'</?(\w+)',s) if t.lower() not in _HTMLSanitizer.acceptable_elements]:
return
# all entities must have been defined as valid HTML entities
if [e for e in re.findall(r'&(\w+);', s) if e not in list(entitydefs.keys())]:
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos != -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith('rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%ss' % key, [FeedParserDict()])[-1]
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, detail)
if author:
detail['name'] = author
if email:
detail['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_dcterms_valid(self, attrsD):
self.push('validity', 1)
def _end_dcterms_valid(self):
for validity_detail in self.pop('validity').split(';'):
if '=' in validity_detail:
key, value = validity_detail.split('=', 1)
if key == 'start':
self._save('validity_start', value, overwrite=True)
self._save('validity_start_parsed', _parse_date(value), overwrite=True)
elif key == 'end':
self._save('validity_end', value, overwrite=True)
self._save('validity_end_parsed', _parse_date(value), overwrite=True)
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = 'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = 'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict(term=term, scheme=scheme, label=label)
if value not in tags:
tags.append(value)
def _start_tags(self, attrsD):
# This is a completely-made up element. Its semantics are determined
# only by a single feed that precipitated bug report 392 on Google Code.
# In short, this is junk code.
self.push('tags', 1)
def _end_tags(self):
for term in self.pop('tags').split(','):
self._addTag(term.strip(), None, None)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), 'http://www.itunes.com/', None)
def _end_media_keywords(self):
for term in self.pop('media_keywords').split(','):
if term.strip():
self._addTag(term.strip(), None, None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', list(attrsD.items()))
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = 'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD['url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_rating(self, attrsD):
context = self._getContext()
context.setdefault('media_rating', attrsD)
self.push('rating', 1)
def _end_media_rating(self):
rating = self.pop('rating')
if rating is not None and rating.strip():
context = self._getContext()
context['media_rating']['content'] = rating
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')]
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix)
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in list(self.namespacesInUse.items()):
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in list(attrs.items()):
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, list(attrsD.items()))
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in list(self.namespacesInUse.items()):
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.__code__ = sgmllib.SGMLParser.goahead.__code__
def __parse_starttag(self, i):
pass
__parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + '_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, str):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = list(dict([(k.lower(), v) for k, v in attrs]).items())
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, str):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((str(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = ''.join([' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
data = data.replace('/', '/')
data = data.replace('/', '/')
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('audio', 'src'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('source', 'src'),
('video', 'poster'),
('video', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or '')
if not base:
return rel or ''
if not rel:
try:
scheme = urllib.parse.urlparse(base)[0]
except ValueError:
return ''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return ''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return ''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set([
'annotation',
'annotation-xml',
'maction',
'maligngroup',
'malignmark',
'math',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msubsup',
'msup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover',
'none',
'semantics',
])
mathml_attributes = set([
'accent',
'accentunder',
'actiontype',
'align',
'alignmentscope',
'altimg',
'altimg-height',
'altimg-valign',
'altimg-width',
'alttext',
'bevelled',
'charalign',
'close',
'columnalign',
'columnlines',
'columnspacing',
'columnspan',
'columnwidth',
'crossout',
'decimalpoint',
'denomalign',
'depth',
'dir',
'display',
'displaystyle',
'edge',
'encoding',
'equalcolumns',
'equalrows',
'fence',
'fontstyle',
'fontweight',
'form',
'frame',
'framespacing',
'groupalign',
'height',
'href',
'id',
'indentalign',
'indentalignfirst',
'indentalignlast',
'indentshift',
'indentshiftfirst',
'indentshiftlast',
'indenttarget',
'infixlinebreakstyle',
'largeop',
'length',
'linebreak',
'linebreakmultchar',
'linebreakstyle',
'lineleading',
'linethickness',
'location',
'longdivstyle',
'lquote',
'lspace',
'mathbackground',
'mathcolor',
'mathsize',
'mathvariant',
'maxsize',
'minlabelspacing',
'minsize',
'movablelimits',
'notation',
'numalign',
'open',
'other',
'overflow',
'position',
'rowalign',
'rowlines',
'rowspacing',
'rowspan',
'rquote',
'rspace',
'scriptlevel',
'scriptminsize',
'scriptsizemultiplier',
'selection',
'separator',
'separators',
'shift',
'side',
'src',
'stackalign',
'stretchy',
'subscriptshift',
'superscriptshift',
'symmetric',
'voffset',
'width',
'xlink:href',
'xlink:show',
'xlink:type',
'xmlns',
'xmlns:xlink',
])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if [n_v for n_v in attrs if n_v[0].startswith('xlink:')]:
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == 'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib.request.HTTPDigestAuthHandler, urllib.request.HTTPRedirectHandler, urllib.request.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib.request.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urllib.parse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
:return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, str) \
and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.parse.splittype(url_file_stream_or_string)
realhost, rest = urllib.parse.splithost(rest)
if realhost:
user_passwd, realhost = urllib.parse.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, str):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, str):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urllib.parse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = ''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urllib.parse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib.request.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, str):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in list(request_headers.items()):
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = '\ub144' # b3e2 in euc-kr
_korean_month = '\uc6d4' # bff9 in euc-kr
_korean_day = '\uc77c' # c0cf in euc-kr
_korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
'\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7
'\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7
'\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7
'\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7
'\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7
'\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7
'\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7
'\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7
'\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7
'\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7
'\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7
'\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7
'\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7
'\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7
'\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7
'\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7
'\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7
'\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7
'\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
'\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
'\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
'\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
'\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
'\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
'\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
'\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile('([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
'janu\u00e1r': '01', # e1 in iso-8859-2
'febru\u00e1ri': '02', # e1 in iso-8859-2
'm\u00e1rcius': '03', # e1 in iso-8859-2
'\u00e1prilis': '04', # e1 in iso-8859-2
'm\u00e1ujus': '05', # e1 in iso-8859-2
'j\u00fanius': '06', # fa in iso-8859-2
'j\u00falius': '07', # fa in iso-8859-2
'augusztus': '08',
'szeptember': '09',
'okt\u00f3ber': '10', # f3 in iso-8859-2
'november': '11',
'december': '12',
}
_hungarian_date_format_re = \
re.compile('(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = list(map(int, timeparts))
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates.
Converts asctime to RFC822-compatible dates and uses the RFC822 parser
to do the actual parsing.
Supported formats (format is standardized to the first one listed):
* {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy
* {weekday name} {month name} dd hh:mm:ss yyyy
"""
parts = dt.split()
# Insert a GMT timezone, if needed.
if len(parts) == 5:
parts.insert(4, '+0000')
# Exit if there are not six parts.
if len(parts) != 6:
return None
# Reassemble the parts in an RFC822-compatible order and parse them.
return _parse_date_rfc822(' '.join([
parts[0], parts[2], parts[1], parts[5], parts[3], parts[4],
]))
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = ''
xml_encoding = ''
rfc3023_encoding = ''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = 'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = 'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = 'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = 'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = 'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = 'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = 'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = 'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
'u16', 'utf-16', 'utf16', 'utf_16',
'u32', 'utf-32', 'utf32', 'utf_32',
'iso-10646-ucs-2', 'iso-10646-ucs-4',
'csucs4', 'csunicode', 'ucs-2', 'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, str):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd',
'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or 'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or 'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == 'gb2312':
rfc3023_encoding = 'gb18030'
if xml_encoding.lower() == 'gb2312':
xml_encoding = 'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
lazy_chardet_encoding = None
tried_encodings = []
if chardet:
def lazy_chardet_encoding():
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, str):
chardet_encoding = str(chardet_encoding, 'ascii', 'ignore')
return chardet_encoding
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'):
if callable(proposed_encoding):
proposed_encoding = proposed_encoding()
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + '\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = ''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = 'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = list(filter(match_safe_entities, entity_results))
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = map(float, value.strip().replace(',', ' ').split())
nxt = latlons.__next__
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Point', 'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'LineString', 'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {'type': 'Polygon', 'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {'type': 'Box', 'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
:return: A :class:`FeedParserDict`.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception as e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in list(result['headers'].items()))
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error) as e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error as e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error as e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', '')
if not isinstance(etag, str):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', '')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, str):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', '')
href = result.get('href', '')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, str) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException as e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
|
mit
| -4,372,440,117,086,031,400 | 6,820,145,005,232,509,000 | 38.895657 | 208 | 0.574702 | false |
eepgwde/pyeg0
|
gmus/GMus0.py
|
1
|
1699
|
## @file GMus0.py
# @brief Application support class for the Unofficial Google Music API.
# @author weaves
#
# @details
# This class uses @c gmusicapi.
#
# @note
# An application support class is one that uses a set of driver classes
# to provide a set of higher-level application specific methods.
#
# @see
# https://github.com/simon-weber/Unofficial-Google-Music-API
# http://unofficial-google-music-api.readthedocs.org/en/latest/
from __future__ import print_function
from GMus00 import GMus00
import logging
import ConfigParser, os, logging
import pandas as pd
import json
from gmusicapi import Mobileclient
## Set of file paths for the configuration file.
paths = ['site.cfg', os.path.expanduser('~/share/site/.safe/gmusic.cfg')]
## Google Music API login, search and result cache.
#
# The class needs to a configuration file with these contents. (The
# values of the keys must be a valid Google Play account.)
#
# <pre>
# [credentials]
# username=username\@gmail.com
# password=SomePassword9
# </pre>
class GMus0(GMus00):
## Ad-hoc method to find the indices of duplicated entries.
def duplicated(self):
# self._df = self._df.sort(['album', 'title', 'creationTimestamp'],
# ascending=[1, 1, 0])
df = self.df[list(['title', 'album', 'creationTimestamp'])]
df['n0'] = df['title'] + '|' + df['album']
df = df.sort(['n0','creationTimestamp'], ascending=[1, 0])
# Only rely on counts of 2.
s0 = pd.Series(df.n0)
s1 = s0.value_counts()
s2 = set( (s1[s1.values >= 2]).index )
df1 = df[df.n0.isin(s2)]
df1['d'] = df1.duplicated('n0')
s3 = list(df1[df1.d].index)
return s3
|
gpl-3.0
| 4,604,474,288,467,485,700 | -6,204,629,093,287,937,000 | 30.462963 | 74 | 0.656857 | false |
samfpetersen/gnuradio
|
gr-zeromq/examples/python/server.py
|
27
|
4700
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
###############################################################################
# Imports
###############################################################################
from gnuradio import zeromq
from gnuradio import gr
from gnuradio import blocks
from gnuradio import analog
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import numpy
import sys
from threading import Thread
import time
###############################################################################
# GNU Radio top_block
###############################################################################
class top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
self.options = options
# socket addresses
rpc_adr = "tcp://*:6666"
probe_adr = "tcp://*:5556"
sink_adr = "tcp://*:5555"
# the strange sampling rate gives a nice movement in the plot :P
self.samp_rate = samp_rate = 48200
# blocks
self.gr_sig_source = analog.sig_source_f(samp_rate, analog.GR_SIN_WAVE , 1000, 1, 0)
self.throttle = blocks.throttle(gr.sizeof_float, samp_rate)
self.mult = blocks.multiply_const_ff(1)
#self.zmq_sink = zeromq.rep_sink(gr.sizeof_float, 1, sink_adr)
self.zmq_sink = zeromq.pub_sink(gr.sizeof_float, 1, sink_adr)
#self.zmq_sink = zeromq.push_sink(gr.sizeof_float, 1, sink_adr)
#self.zmq_probe = zeromq.push_sink(gr.sizeof_float, 1, probe_adr)
self.zmq_probe = zeromq.pub_sink(gr.sizeof_float, 1, probe_adr)
#self.null_sink = blocks.null_sink(gr.sizeof_float)
# connects
self.connect(self.gr_sig_source, self.mult, self.throttle, self.zmq_sink)
self.connect(self.throttle, self.zmq_probe)
# ZeroMQ
self.rpc_manager = zeromq.rpc_manager()
self.rpc_manager.set_reply_socket(rpc_adr)
self.rpc_manager.add_interface("start_fg",self.start_fg)
self.rpc_manager.add_interface("stop_fg",self.stop_fg)
self.rpc_manager.add_interface("set_waveform",self.set_waveform)
self.rpc_manager.add_interface("set_k",self.mult.set_k)
self.rpc_manager.add_interface("get_sample_rate",self.throttle.sample_rate)
self.rpc_manager.start_watcher()
def start_fg(self):
print "Start Flowgraph"
try:
self.start()
except RuntimeError:
print "Can't start, flowgraph already running!"
def stop_fg(self):
print "Stop Flowgraph"
self.stop()
self.wait()
def set_waveform(self, waveform_str):
waveform = {'Constant' : analog.GR_CONST_WAVE,
'Sine' : analog.GR_SIN_WAVE,
'Cosine' : analog.GR_COS_WAVE,
'Square' : analog.GR_SQR_WAVE,
'Triangle' : analog.GR_TRI_WAVE,
'Saw Tooth' : analog.GR_SAW_WAVE}[waveform_str]
self.gr_sig_source.set_waveform(waveform)
###############################################################################
# Options Parser
###############################################################################
def parse_options():
""" Options parser. """
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
return options
###############################################################################
# Main
###############################################################################
if __name__ == "__main__":
options = parse_options()
tb = top_block(options)
try:
# keep the program running when flowgraph is stopped
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
print "Shutting down flowgraph."
tb.rpc_manager.stop_watcher()
tb.stop()
tb.wait()
tb = None
|
gpl-3.0
| 3,526,156,923,703,162,000 | -7,683,755,706,516,982,000 | 36.301587 | 92 | 0.558085 | false |
Asana/mypipe
|
avro/lang/c/jansson/doc/conf.py
|
58
|
7023
|
# -*- coding: utf-8 -*-
#
# Jansson documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 5 21:47:20 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['refcounting']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jansson'
copyright = u'2009-2011, Petri Lehtinen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1'
# The full version, including alpha/beta/rc tags.
release = '2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'c:func'
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Janssondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jansson.tex', u'Jansson Documentation',
u'Petri Lehtinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jansson', u'Jansson Documentation',
[u'Petri Lehtinen'], 1)
]
|
apache-2.0
| 8,287,041,190,649,953,000 | -1,929,336,584,794,478,800 | 31.364055 | 80 | 0.708244 | false |
bobobox/ansible
|
lib/ansible/modules/files/replace.py
|
11
|
6445
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Evan Kaufman <[email protected]
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: replace
author: "Evan Kaufman (@EvanK)"
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression.
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
path:
required: true
aliases: [ dest, destfile, name ]
description:
- The file to modify.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
regexp:
required: true
description:
- The regular expression to look for in the contents of the file.
Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
Uses multiline mode, which means C(^) and C($) match the beginning
and end respectively of I(each line) of the file.
replace:
required: false
description:
- The string to replace regexp matches. May contain backreferences
that will get expanded with the regexp capture groups if the regexp
matches. If not set, matches are removed entirely.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
"""
EXAMPLES = r"""
# Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
backup: yes
- replace:
path: /home/jdoe/.ssh/known_hosts
regexp: '^old\.host\.name[^\n]*\n'
owner: jdoe
group: jdoe
mode: 0644
- replace:
path: /etc/apache/ports
regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
replace: '\1 127.0.0.1:8080'
validate: '/usr/sbin/apache2ctl -f %s -t'
"""
import os
import re
import tempfile
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.write(to_bytes(contents))
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'),
regexp=dict(required=True),
replace=dict(default='', type='str'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
path = os.path.expanduser(params['path'])
res_args = dict()
if os.path.isdir(path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
f = open(path, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict')
f.close()
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], contents, 0)
if result[1] > 0 and contents != result[0]:
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
res_args['diff'] = {
'before_header': path,
'before': contents,
'after_header': path,
'after': result[0],
}
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(path):
res_args['backup_file'] = module.backup_local(path)
if params['follow'] and os.path.islink(path):
path = os.path.realpath(path)
write_changes(module, result[0], path)
res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
gpl-3.0
| -5,199,579,180,886,703,000 | 4,870,717,614,529,976,000 | 31.386935 | 114 | 0.625756 | false |
captainpete/rethinkdb
|
test/memcached_workloads/multi_serial_mix.py
|
29
|
3527
|
#!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import multiprocessing, time, pickle
import memcached_workload_common, serial_mix
from vcoptparse import *
def child(opts, log_path, load, save):
# This is run in a separate process
import sys
# TODO: this overwrites existing log files
sys.stdout = sys.stderr = file(log_path, "w")
if load is None:
clone, deleted = {}, set()
else:
print("Loading from %r..." % load)
with open(load) as f:
clone, deleted = pickle.load(f)
print("Starting test against server at %s..." % opts["address"])
with memcached_workload_common.make_memcache_connection(opts) as mc:
serial_mix.test(opts, mc, clone, deleted)
if save is not None:
print("Saving to %r..." % save)
with open(save, "w") as f:
pickle.dump((clone, deleted), f)
print("Done with test.")
op = serial_mix.option_parser_for_serial_mix()
op["num_testers"] = IntFlag("--num-testers", 16)
op["load"] = StringFlag("--load", None)
op["save"] = StringFlag("--save", None)
opts = op.parse(sys.argv)
shutdown_grace_period = 15
tester_log_dir = "multi_serial_mix_out"
if not os.path.isdir(tester_log_dir): os.mkdir(tester_log_dir)
processes = []
try:
print("Starting %d child processes..." % opts["num_testers"])
print("Writing output from child processes to %r" % tester_log_dir)
for id in xrange(opts["num_testers"]):
log_path = os.path.join(tester_log_dir, "%d.txt" % id)
load_path = opts["load"] + "_%d" % id if opts["load"] is not None else None
save_path = opts["save"] + "_%d" % id if opts["save"] is not None else None
opts2 = dict(opts)
opts2["keysuffix"] = "_%d" % id # Prevent collisions between tests
process = multiprocessing.Process(target=child, args=(opts2, log_path, load_path, save_path))
process.start()
processes.append((process, id))
print("Waiting for child processes...")
start_time = time.time()
def time_remaining():
time_elapsed = time.time() - start_time
# Give subprocesses lots of extra time
return opts["duration"] * 2 - time_elapsed + 1
for process, id in processes:
tr = time_remaining()
if tr <= 0: tr = shutdown_grace_period
process.join(tr)
stuck = sorted(id for (process, id) in processes if process.is_alive())
failed = sorted(id for (process, id) in processes if not process.is_alive() and process.exitcode != 0)
if stuck or failed:
for id in stuck + failed:
with file(os.path.join(tester_log_dir, str(id) + ".txt")) as f:
for line in f:
sys.stdout.write(line)
if len(stuck) == opts["num_testers"]:
raise ValueError("All %d processes did not finish in time." % opts["num_testers"])
elif len(failed) == opts["num_testers"]:
raise ValueError("All %d processes failed." % opts["num_testers"])
else:
raise ValueError(
"Of processes [1 ... %d], the following did not finish in time: "
"%s and the following failed: %s" % (opts["num_testers"], stuck, failed)
)
finally:
for (process, id) in processes:
if process.is_alive():
process.terminate()
print("Done.")
|
agpl-3.0
| 3,539,737,092,059,249,700 | -1,587,219,451,137,934,600 | 34.626263 | 106 | 0.6093 | false |
mdmueller/ascii-profiling
|
parallel.py
|
1
|
4245
|
import timeit
import time
from astropy.io import ascii
import pandas
import numpy as np
from astropy.table import Table, Column
from tempfile import NamedTemporaryFile
import random
import string
import matplotlib.pyplot as plt
import webbrowser
def make_table(table, size=10000, n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
if str_val is None:
str_val = "abcde12345"
cols = []
for i in xrange(n_floats):
dat = np.random.uniform(low=1, high=10, size=size)
cols.append(Column(dat, name='f{}'.format(i)))
for i in xrange(n_ints):
dat = np.random.randint(low=-9999999, high=9999999, size=size)
cols.append(Column(dat, name='i{}'.format(i)))
for i in xrange(n_strs):
if str_val == 'random':
dat = np.array([''.join([random.choice(string.letters) for j in range(10)]) for k in range(size)])
else:
dat = np.repeat(str_val, size)
cols.append(Column(dat, name='s{}'.format(i)))
t = Table(cols)
if float_format is not None:
for col in t.columns.values():
if col.name.startswith('f'):
col.format = float_format
t.write(table.name, format='ascii')
output_text = []
def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
global table1, output_text
n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run
numbers = (1, 1, 1, 1, 1)
repeats = (3, 2, 1, 1, 1)
times_fast = []
times_fast_parallel = []
times_pandas = []
for n_row, number, repeat in zip(n_rows, numbers, repeats):
table1 = NamedTemporaryFile()
make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast.append(min(t) / number)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast_parallel.append(min(t) / number)
t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)",
setup='from __main__ import table1, pandas', number=number, repeat=repeat)
times_pandas.append(min(t) / number)
plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c')
plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c')
plt.loglog(n_rows, times_pandas, '-oc', label='Pandas')
plt.grid()
plt.legend(loc='best')
plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format(
n_floats, n_ints, n_strs, float_format, str_val))
plt.xlabel('Number of rows')
plt.ylabel('Time (sec)')
img_file = 'graph{}.png'.format(len(output_text) + 1)
plt.savefig(img_file)
plt.clf()
text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1])
text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1])
output_text.append((img_file, text))
plot_case(n_floats=10, n_ints=0, n_strs=0)
plot_case(n_floats=10, n_ints=10, n_strs=10)
plot_case(n_floats=10, n_ints=10, n_strs=10, float_format='%.4f')
plot_case(n_floats=10, n_ints=0, n_strs=0, float_format='%.4f')
plot_case(n_floats=0, n_ints=0, n_strs=10)
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="'asdf asdfa'")
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="random")
plot_case(n_floats=0, n_ints=10, n_strs=0)
html_file = open('out.html', 'w')
html_file.write('<html><head><meta charset="utf-8"/><meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>')
html_file.write('</html><body><h1 style="text-align:center;">Profile of io.ascii</h1>')
for img, descr in output_text:
html_file.write('<img src="{}"><p style="font-weight:bold;">{}</p><hr>'.format(img, descr))
html_file.write('</body></html>')
html_file.close()
webbrowser.open('out.html')
|
mit
| 5,229,903,624,053,802,000 | 990,531,518,315,476,900 | 45.141304 | 122 | 0.623793 | false |
patriciohc/carga-de-xls-a-MySQL
|
Choose_file.py
|
1
|
3639
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Choose_file.ui'
#
# Created: Sat Oct 17 15:55:19 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(524, 146)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.txtFile = QtGui.QLineEdit(self.verticalLayoutWidget)
self.txtFile.setObjectName(_fromUtf8("txtFile"))
self.horizontalLayout_2.addWidget(self.txtFile)
self.btChooseFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btChooseFile.setObjectName(_fromUtf8("btChooseFile"))
self.horizontalLayout_2.addWidget(self.btChooseFile)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btClose = QtGui.QPushButton(self.verticalLayoutWidget)
self.btClose.setObjectName(_fromUtf8("btClose"))
self.horizontalLayout.addWidget(self.btClose)
self.btLoadFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btLoadFile.setObjectName(_fromUtf8("btLoadFile"))
self.horizontalLayout.addWidget(self.btLoadFile)
self.verticalLayout.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label.setText(_translate("MainWindow", "File", None))
self.btChooseFile.setText(_translate("MainWindow", "Choose", None))
self.btClose.setText(_translate("MainWindow", "Cerrar", None))
self.btLoadFile.setText(_translate("MainWindow", "Cargar Archivo", None))
|
apache-2.0
| 6,800,182,430,826,470,000 | 96,644,769,519,863,520 | 46.25974 | 82 | 0.718054 | false |
Endika/partner-contact
|
partner_firstname/tests/test_name.py
|
25
|
3241
|
# -*- coding: utf-8 -*-
# Authors: Nemry Jonathan
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""Test naming logic.
To have more accurate results, remove the ``mail`` module before testing.
"""
from .base import BaseCase
class PartnerContactCase(BaseCase):
def test_update_lastname(self):
"""Change lastname."""
self.expect(u"newlästname", self.firstname)
self.original.name = self.name
def test_update_firstname(self):
"""Change firstname."""
self.expect(self.lastname, u"newfïrstname")
self.original.name = self.name
def test_whitespace_cleanup(self):
"""Check that whitespace in name gets cleared."""
self.expect(u"newlästname", u"newfïrstname")
self.original.name = " newlästname newfïrstname "
# Need this to refresh the ``name`` field
self.original.invalidate_cache()
class PartnerCompanyCase(BaseCase):
def create_original(self):
super(PartnerCompanyCase, self).create_original()
self.original.is_company = True
def test_copy(self):
"""Copy the partner and compare the result."""
super(PartnerCompanyCase, self).test_copy()
self.expect(self.name, False, self.name)
def test_company_inverse(self):
"""Test the inverse method in a company record."""
name = u"Thïs is a Companŷ"
self.expect(name, False, name)
self.original.name = name
class UserCase(PartnerContactCase):
def create_original(self):
name = u"%s %s" % (self.lastname, self.firstname)
# Cannot create users if ``mail`` is installed
if self.mail_installed():
self.original = self.env.ref("base.user_demo")
self.original.name = name
else:
self.original = self.env["res.users"].create({
"name": name,
"login": "[email protected]"})
def test_copy(self):
"""Copy the partner and compare the result."""
# Skip if ``mail`` is installed
if not self.mail_installed():
super(UserCase, self).test_copy()
|
agpl-3.0
| -4,760,050,304,613,419,000 | 7,659,815,055,940,602,000 | 34.922222 | 77 | 0.674606 | false |
benssson/flatbuffers
|
python/flatbuffers/compat.py
|
19
|
1465
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A tiny version of `six` to help with backwards compability. """
import sys
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PY275 = sys.version_info[0:3] >= (2, 7, 5)
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = (str,)
binary_types = (bytes,bytearray)
range_func = range
memoryview_type = memoryview
struct_bool_decl = "?"
else:
string_types = (unicode,)
if PY26 or PY27:
binary_types = (str,bytearray)
else:
binary_types = (str,)
range_func = xrange
if PY26 or (PY27 and not PY275):
memoryview_type = buffer
struct_bool_decl = "<b"
else:
memoryview_type = memoryview
struct_bool_decl = "?"
# NOTE: Future Jython support may require code here (look at `six`).
|
apache-2.0
| -744,504,120,299,736,400 | -6,130,355,269,743,188,000 | 30.847826 | 74 | 0.665529 | false |
ppanczyk/ansible
|
lib/ansible/modules/network/f5/bigip_user.py
|
10
|
18341
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_user
short_description: Manage user accounts and user attributes on a BIG-IP.
description:
- Manage user accounts and user attributes on a BIG-IP.
version_added: "2.4"
options:
full_name:
description:
- Full name of the user.
username_credential:
description:
- Name of the user to create, remove or modify.
required: True
aliases:
- name
password_credential:
description:
- Set the users password to this unencrypted value.
C(password_credential) is required when creating a new account.
shell:
description:
- Optionally set the users shell.
choices:
- bash
- none
- tmsh
partition_access:
description:
- Specifies the administrative partition to which the user has access.
C(partition_access) is required when creating a new account.
Should be in the form "partition:role". Valid roles include
C(acceleration-policy-editor), C(admin), C(application-editor), C(auditor)
C(certificate-manager), C(guest), C(irule-manager), C(manager), C(no-access)
C(operator), C(resource-admin), C(user-manager), C(web-application-security-administrator),
and C(web-application-security-editor). Partition portion of tuple should
be an existing partition or the value 'all'.
state:
description:
- Whether the account should exist or not, taking action if the state is
different from what is stated.
default: present
choices:
- present
- absent
update_password:
description:
- C(always) will allow to update passwords if the user chooses to do so.
C(on_create) will only set the password for newly created users.
default: on_create
choices:
- always
- on_create
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
- Requires BIG-IP versions >= 12.0.0
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = '''
- name: Add the user 'johnd' as an admin
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
password_credential: "password"
full_name: "John Doe"
partition_access: "all:admin"
update_password: "on_create"
state: "present"
delegate_to: localhost
- name: Change the user "johnd's" role and shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
partition_access: "NewPartition:manager"
shell: "tmsh"
state: "present"
delegate_to: localhost
- name: Make the user 'johnd' an admin and set to advanced shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
partition_access: "all:admin"
shell: "bash"
state: "present"
delegate_to: localhost
- name: Remove the user 'johnd'
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
state: "absent"
delegate_to: localhost
- name: Update password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "johnd"
password_credential: "newsupersecretpassword"
delegate_to: localhost
# Note that the second time this task runs, it would fail because
# The password has been changed. Therefore, it is recommended that
# you either,
#
# * Put this in its own playbook that you run when you need to
# * Put this task in a `block`
# * Include `ignore_errors` on this task
- name: Change the Admin password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "admin"
password_credential: "NewSecretPassword"
delegate_to: localhost
'''
RETURN = '''
full_name:
description: Full name of the user
returned: changed and success
type: string
sample: "John Doe"
partition_access:
description:
- List of strings containing the user's roles and which partitions they
are applied to. They are specified in the form "partition:role".
returned: changed and success
type: list
sample: "['all:admin']"
shell:
description: The shell assigned to the user account
returned: changed and success
type: string
sample: "tmsh"
'''
from distutils.version import LooseVersion
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'partitionAccess': 'partition_access',
'description': 'full_name',
}
updatables = [
'partition_access', 'full_name', 'shell', 'password_credential'
]
returnables = [
'shell', 'partition_access', 'full_name', 'username_credential'
]
api_attributes = [
'shell', 'partitionAccess', 'description', 'name', 'password'
]
@property
def partition_access(self):
"""Partition access values will require some transformation.
This operates on both user and device returned values.
Check if the element is a string from user input in the format of
name:role, if it is split it and create dictionary out of it.
If the access value is a dictionary (returned from device,
or already processed) and contains nameReference
key, delete it and append the remaining dictionary element into
a list.
If the nameReference key is removed just append the dictionary
into the list.
:returns list of dictionaries
"""
if self._values['partition_access'] is None:
return
result = []
part_access = self._values['partition_access']
for access in part_access:
if isinstance(access, dict):
if 'nameReference' in access:
del access['nameReference']
result.append(access)
else:
result.append(access)
if isinstance(access, str):
acl = access.split(':')
if acl[0].lower() == 'all':
acl[0] = 'all-partitions'
value = dict(
name=acl[0],
role=acl[1]
)
result.append(value)
return result
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
elif api_attribute == 'password':
result[api_attribute] = self._values['password_credential']
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
if self.is_version_less_than_13():
manager = UnparitionedManager(self.client)
else:
manager = PartitionedManager(self.client)
return manager.exec_module()
def is_version_less_than_13(self):
"""Checks to see if the TMOS version is less than 13
Anything less than BIG-IP 13.x does not support users
on different partitions.
:return: Bool
"""
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
class BaseManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
if key == 'password_credential':
new_pass = getattr(self.want, key)
if self.want.update_password == 'always':
changed[key] = new_pass
else:
# We set the shell parameter to 'none' when bigip does
# not return it.
if self.want.shell == 'bash':
self.validate_shell_parameter()
if self.want.shell == 'none' and \
self.have.shell is None:
self.have.shell = 'none'
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def validate_shell_parameter(self):
"""Method to validate shell parameters.
Raise when shell attribute is set to 'bash' with roles set to
either 'admin' or 'resource-admin'.
NOTE: Admin and Resource-Admin roles automatically enable access to
all partitions, removing any other roles that the user might have
had. There are few other roles which do that but those roles,
do not allow bash.
"""
err = "Shell access is only available to " \
"'admin' or 'resource-admin' roles"
permit = ['admin', 'resource-admin']
if self.have is not None:
have = self.have.partition_access
if not any(r['role'] for r in have if r['role'] in permit):
raise F5ModuleError(err)
# This check is needed if we want to modify shell AND
# partition_access attribute.
# This check will also trigger on create.
if self.want.partition_access is not None:
want = self.want.partition_access
if not any(r['role'] for r in want if r['role'] in permit):
raise F5ModuleError(err)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def validate_create_parameters(self):
"""Password credentials and partition access are mandatory,
when creating a user resource.
"""
if self.want.password_credential and \
self.want.update_password != 'on_create':
err = "The 'update_password' option " \
"needs to be set to 'on_create' when creating " \
"a resource with a password."
raise F5ModuleError(err)
if self.want.partition_access is None:
err = "The 'partition_access' option " \
"is required when creating a resource."
raise F5ModuleError(err)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the user")
return True
def create(self):
self.validate_create_parameters()
if self.want.shell == 'bash':
self.validate_shell_parameter()
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
class UnparitionedManager(BaseManager):
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.auth.users.user.create(**params)
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.auth.users.user.load(name=self.want.name)
result.modify(**params)
def read_current_from_device(self):
tmp_res = self.client.api.tm.auth.users.user.load(name=self.want.name)
result = tmp_res.attrs
return Parameters(result)
def exists(self):
return self.client.api.tm.auth.users.user.exists(name=self.want.name)
def remove_from_device(self):
result = self.client.api.tm.auth.users.user.load(name=self.want.name)
if result:
result.delete()
class PartitionedManager(BaseManager):
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.auth.users.user.create(
partition=self.want.partition, **params
)
def _read_one_resource_from_collection(self):
collection = self.client.api.tm.auth.users.get_collection(
requests_params=dict(
params="$filter=partition+eq+'{0}'".format(self.want.partition)
)
)
collection = [x for x in collection if x.name == self.want.name]
if len(collection) == 1:
resource = collection.pop()
return resource
elif len(collection) == 0:
raise F5ModuleError(
"No accounts with the provided name were found"
)
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
def update_on_device(self):
params = self.want.api_params()
try:
resource = self._read_one_resource_from_collection()
resource.modify(**params)
except iControlUnexpectedHTTPError as ex:
# TODO: Patch this in the F5 SDK so that I dont need this check
if 'updated successfully' not in str(ex):
raise F5ModuleError(
"Failed to update the specified user"
)
def read_current_from_device(self):
resource = self._read_one_resource_from_collection()
result = resource.attrs
return Parameters(result)
def exists(self):
collection = self.client.api.tm.auth.users.get_collection(
requests_params=dict(
params="$filter=partition+eq+'{0}'".format(self.want.partition)
)
)
collection = [x for x in collection if x.name == self.want.name]
if len(collection) == 1:
result = True
elif len(collection) == 0:
result = False
else:
raise F5ModuleError(
"Multiple users with the provided name were found!"
)
return result
def remove_from_device(self):
resource = self._read_one_resource_from_collection()
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
required=True,
aliases=['username_credential']
),
password_credential=dict(
no_log=True,
),
partition_access=dict(
type='list'
),
full_name=dict(),
shell=dict(
choices=['none', 'bash', 'tmsh']
),
update_password=dict(
default='always',
choices=['always', 'on_create']
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
| -5,159,567,149,617,251,000 | 1,415,360,345,897,547,300 | 30.842014 | 99 | 0.568126 | false |
nzavagli/UnrealPy
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/tests/unit/emr/test_emr_responses.py
|
98
|
17266
|
# Copyright (c) 2010 Jeremy Thurgood <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# NOTE: These tests only cover the very simple cases I needed to test
# for the InstanceGroup fix.
import xml.sax
from boto import handler
from boto.emr import emrobject
from boto.resultset import ResultSet
from tests.compat import unittest
JOB_FLOW_EXAMPLE = b"""
<DescribeJobFlowsResponse
xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<StartDateTime>2009-01-28T21:49:16Z</StartDateTime>
<State>STARTING</State>
</ExecutionStatusDetail>
<BootstrapActions>
<member>
<BootstrapActionConfig>
<ScriptBootstrapAction>
<Args/>
<Path>s3://elasticmapreduce/libs/hue/install-hue</Path>
</ScriptBootstrapAction>
<Name>Install Hue</Name>
</BootstrapActionConfig>
</member>
</BootstrapActions>
<VisibleToAllUsers>true</VisibleToAllUsers>
<SupportedProducts>
<member>Hue</member>
</SupportedProducts>
<Name>MyJobFlowName</Name>
<LogUri>mybucket/subdir/</LogUri>
<Steps>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
<State>PENDING</State>
</ExecutionStatusDetail>
<StepConfig>
<HadoopJarStep>
<Jar>MyJarFile</Jar>
<MainClass>MyMailClass</MainClass>
<Args>
<member>arg1</member>
<member>arg2</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>MyStepName</Name>
<ActionOnFailure>CONTINUE</ActionOnFailure>
</StepConfig>
</member>
</Steps>
<JobFlowId>j-3UN6WX5RRO2AG</JobFlowId>
<Instances>
<Placement>
<AvailabilityZone>us-east-1a</AvailabilityZone>
</Placement>
<SlaveInstanceType>m1.small</SlaveInstanceType>
<MasterInstanceType>m1.small</MasterInstanceType>
<Ec2KeyName>myec2keyname</Ec2KeyName>
<InstanceCount>4</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
JOB_FLOW_COMPLETED = b"""
<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
<DescribeJobFlowsResult>
<JobFlows>
<member>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<LastStateChangeReason>Steps completed</LastStateChangeReason>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
</ExecutionStatusDetail>
<BootstrapActions/>
<Name>RealJobFlowName</Name>
<LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri>
<Steps>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar>
<Args>
<member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>Setup Hadoop Debugging</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:04:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/20/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/19/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/18/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/17/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/16/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/15/*</member>
<member>-input</member>
<member>s3://example.emrtest.data/raw/2010/10/14/*</member>
<member>-output</member>
<member>s3://example.emrtest.crunched/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_Initial</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:04:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:36:18Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step1/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step1</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:36:18Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:37:51Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.crunched/*</member>
<member>-output</member>
<member>s3://example.emrtest.step2/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step2</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:37:51Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:39:32Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step3/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step3</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:39:32Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:41:22Z</EndDateTime>
</ExecutionStatusDetail>
</member>
<member>
<StepConfig>
<HadoopJarStep>
<Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
<Args>
<member>-mapper</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member>
<member>-reducer</member>
<member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member>
<member>-input</member>
<member>s3://example.emrtest.step1/*</member>
<member>-output</member>
<member>s3://example.emrtest.step4/</member>
</Args>
<Properties/>
</HadoopJarStep>
<Name>testjob_step4</Name>
<ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
</StepConfig>
<ExecutionStatusDetail>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<StartDateTime>2010-10-21T01:41:22Z</StartDateTime>
<State>COMPLETED</State>
<EndDateTime>2010-10-21T01:43:03Z</EndDateTime>
</ExecutionStatusDetail>
</member>
</Steps>
<JobFlowId>j-3H3Q13JPFLU22</JobFlowId>
<Instances>
<SlaveInstanceType>m1.large</SlaveInstanceType>
<MasterInstanceId>i-64c21609</MasterInstanceId>
<Placement>
<AvailabilityZone>us-east-1b</AvailabilityZone>
</Placement>
<InstanceGroups>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:02:09Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>1</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>MASTER</InstanceRole>
<InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId>
<Name>master</Name>
</member>
<member>
<CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
<InstanceRunningCount>0</InstanceRunningCount>
<StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
<ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
<State>ENDED</State>
<EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
<InstanceRequestCount>9</InstanceRequestCount>
<InstanceType>m1.large</InstanceType>
<Market>ON_DEMAND</Market>
<LastStateChangeReason>Job flow terminated</LastStateChangeReason>
<InstanceRole>CORE</InstanceRole>
<InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId>
<Name>slave</Name>
</member>
</InstanceGroups>
<NormalizedInstanceHours>40</NormalizedInstanceHours>
<HadoopVersion>0.20</HadoopVersion>
<MasterInstanceType>m1.large</MasterInstanceType>
<MasterPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MasterPublicDnsName>
<Ec2KeyName>myubersecurekey</Ec2KeyName>
<InstanceCount>10</InstanceCount>
<KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps>
</Instances>
</member>
</JobFlows>
</DescribeJobFlowsResult>
<ResponseMetadata>
<RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId>
</ResponseMetadata>
</DescribeJobFlowsResponse>
"""
class TestEMRResponses(unittest.TestCase):
def _parse_xml(self, body, markers):
rs = ResultSet(markers)
h = handler.XmlHandler(rs, None)
xml.sax.parseString(body, h)
return rs
def _assert_fields(self, response, **fields):
for field, expected in fields.items():
actual = getattr(response, field)
self.assertEquals(expected, actual,
"Field %s: %r != %r" % (field, expected, actual))
def test_JobFlows_example(self):
[jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2009-01-28T21:49:16Z',
startdatetime='2009-01-28T21:49:16Z',
state='STARTING',
instancecount='4',
jobflowid='j-3UN6WX5RRO2AG',
loguri='mybucket/subdir/',
name='MyJobFlowName',
availabilityzone='us-east-1a',
slaveinstancetype='m1.small',
masterinstancetype='m1.small',
ec2keyname='myec2keyname',
keepjobflowalivewhennosteps='true')
def test_JobFlows_completed(self):
[jobflow] = self._parse_xml(JOB_FLOW_COMPLETED,
[('member', emrobject.JobFlow)])
self._assert_fields(jobflow,
creationdatetime='2010-10-21T01:00:25Z',
startdatetime='2010-10-21T01:03:59Z',
enddatetime='2010-10-21T01:44:18Z',
state='COMPLETED',
instancecount='10',
jobflowid='j-3H3Q13JPFLU22',
loguri='s3n://example.emrtest.scripts/jobflow_logs/',
name='RealJobFlowName',
availabilityzone='us-east-1b',
slaveinstancetype='m1.large',
masterinstancetype='m1.large',
ec2keyname='myubersecurekey',
keepjobflowalivewhennosteps='false')
self.assertEquals(6, len(jobflow.steps))
self.assertEquals(2, len(jobflow.instancegroups))
|
mit
| 9,078,379,829,397,085,000 | 3,346,027,110,846,919,700 | 43.5 | 99 | 0.569269 | false |
sgraham/nope
|
tools/telemetry/telemetry/image_processing/image_util_numpy_impl.py
|
6
|
6585
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
from telemetry.core import util
from telemetry.image_processing import histogram
from telemetry.image_processing import rgba_color
from telemetry.util import external_modules
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'png')
import png # pylint: disable=F0401
cv2 = external_modules.ImportOptionalModule('cv2')
np = external_modules.ImportRequiredModule('numpy')
def Channels(image):
return image.shape[2]
def Width(image):
return image.shape[1]
def Height(image):
return image.shape[0]
def Pixels(image):
return bytearray(np.uint8(image[:, :, ::-1]).flat) # Convert from bgr to rgb.
def GetPixelColor(image, x, y):
bgr = image[y][x]
return rgba_color.RgbaColor(bgr[2], bgr[1], bgr[0])
def WritePngFile(image, path):
if cv2 is not None:
cv2.imwrite(path, image)
else:
with open(path, "wb") as f:
metadata = {}
metadata['size'] = (Width(image), Height(image))
metadata['alpha'] = False
metadata['bitdepth'] = 8
img = image[:, :, ::-1]
pixels = img.reshape(-1).tolist()
png.Writer(**metadata).write_array(f, pixels)
def FromRGBPixels(width, height, pixels, bpp):
img = np.array(pixels, order='F', dtype=np.uint8)
img.resize((height, width, bpp))
if bpp == 4:
img = img[:, :, :3] # Drop alpha.
return img[:, :, ::-1] # Convert from rgb to bgr.
def FromPngFile(path):
if cv2 is not None:
img = cv2.imread(path, cv2.CV_LOAD_IMAGE_COLOR)
if img is None:
raise ValueError('Image at path {0} could not be read'.format(path))
return img
else:
with open(path, "rb") as f:
return FromPng(f.read())
def FromPng(png_data):
if cv2 is not None:
file_bytes = np.asarray(bytearray(png_data), dtype=np.uint8)
return cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_COLOR)
else:
width, height, pixels, meta = png.Reader(bytes=png_data).read_flat()
return FromRGBPixels(width, height, pixels, 4 if meta['alpha'] else 3)
def _SimpleDiff(image1, image2):
if cv2 is not None:
return cv2.absdiff(image1, image2)
else:
amax = np.maximum(image1, image2)
amin = np.minimum(image1, image2)
return amax - amin
def AreEqual(image1, image2, tolerance, likely_equal):
if image1.shape != image2.shape:
return False
self_image = image1
other_image = image2
if tolerance:
if likely_equal:
return np.amax(_SimpleDiff(image1, image2)) <= tolerance
else:
for row in xrange(Height(image1)):
if np.amax(_SimpleDiff(image1[row], image2[row])) > tolerance:
return False
return True
else:
if likely_equal:
return (self_image == other_image).all()
else:
for row in xrange(Height(image1)):
if not (self_image[row] == other_image[row]).all():
return False
return True
def Diff(image1, image2):
self_image = image1
other_image = image2
if image1.shape[2] != image2.shape[2]:
raise ValueError('Cannot diff images of differing bit depth')
if image1.shape[:2] != image2.shape[:2]:
width = max(Width(image1), Width(image2))
height = max(Height(image1), Height(image2))
self_image = np.zeros((width, height, image1.shape[2]), np.uint8)
other_image = np.zeros((width, height, image1.shape[2]), np.uint8)
self_image[0:Height(image1), 0:Width(image1)] = image1
other_image[0:Height(image2), 0:Width(image2)] = image2
return _SimpleDiff(self_image, other_image)
def GetBoundingBox(image, color, tolerance):
if cv2 is not None:
color = np.array([color.b, color.g, color.r])
img = cv2.inRange(image, np.subtract(color[0:3], tolerance),
np.add(color[0:3], tolerance))
count = cv2.countNonZero(img)
if count == 0:
return None, 0
contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contour = np.concatenate(contours)
return cv2.boundingRect(contour), count
else:
if tolerance:
color = np.array([color.b, color.g, color.r])
colorm = color - tolerance
colorp = color + tolerance
b = image[:, :, 0]
g = image[:, :, 1]
r = image[:, :, 2]
w = np.where(((b >= colorm[0]) & (b <= colorp[0]) &
(g >= colorm[1]) & (g <= colorp[1]) &
(r >= colorm[2]) & (r <= colorp[2])))
else:
w = np.where((image[:, :, 0] == color.b) &
(image[:, :, 1] == color.g) &
(image[:, :, 2] == color.r))
if len(w[0]) == 0:
return None, 0
return (w[1][0], w[0][0], w[1][-1] - w[1][0] + 1, w[0][-1] - w[0][0] + 1), \
len(w[0])
def Crop(image, left, top, width, height):
img_height, img_width = image.shape[:2]
if (left < 0 or top < 0 or
(left + width) > img_width or
(top + height) > img_height):
raise ValueError('Invalid dimensions')
return image[top:top + height, left:left + width]
def GetColorHistogram(image, ignore_color, tolerance):
if cv2 is not None:
mask = None
if ignore_color is not None:
color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
mask = ~cv2.inRange(image, np.subtract(color, tolerance),
np.add(color, tolerance))
flatten = np.ndarray.flatten
hist_b = flatten(cv2.calcHist([image], [0], mask, [256], [0, 256]))
hist_g = flatten(cv2.calcHist([image], [1], mask, [256], [0, 256]))
hist_r = flatten(cv2.calcHist([image], [2], mask, [256], [0, 256]))
else:
filtered = image.reshape(-1, 3)
if ignore_color is not None:
color = np.array([ignore_color.b, ignore_color.g, ignore_color.r])
colorm = np.array(color) - tolerance
colorp = np.array(color) + tolerance
in_range = ((filtered[:, 0] < colorm[0]) | (filtered[:, 0] > colorp[0]) |
(filtered[:, 1] < colorm[1]) | (filtered[:, 1] > colorp[1]) |
(filtered[:, 2] < colorm[2]) | (filtered[:, 2] > colorp[2]))
filtered = np.compress(in_range, filtered, axis = 0)
if len(filtered[:, 0]) == 0:
return histogram.ColorHistogram(np.zeros((256)), np.zeros((256)),
np.zeros((256)), ignore_color)
hist_b = np.bincount(filtered[:, 0], minlength=256)
hist_g = np.bincount(filtered[:, 1], minlength=256)
hist_r = np.bincount(filtered[:, 2], minlength=256)
return histogram.ColorHistogram(hist_r, hist_g, hist_b, ignore_color)
|
bsd-3-clause
| 1,616,850,306,852,785,400 | -1,425,658,481,783,643,400 | 34.594595 | 80 | 0.614123 | false |
Lujeni/ansible
|
lib/ansible/modules/cloud/docker/docker_config.py
|
16
|
9206
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_config
short_description: Manage docker configs.
version_added: "2.8"
description:
- Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
- Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
unless the I(force) option is set.
- Updates to configs are performed by removing the config and creating it again.
options:
data:
description:
- The value of the config. Required when state is C(present).
type: str
data_is_b64:
description:
- If set to C(true), the data is assumed to be Base64 encoded and will be
decoded before being used.
- To use binary I(data), it is better to keep it Base64 encoded and let it
be decoded by this option.
type: bool
default: no
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
type: dict
force:
description:
- Use with state C(present) to always remove and recreate an existing config.
- If C(true), an existing config will be replaced, even if it has not been changed.
type: bool
default: no
name:
description:
- The name of the config.
type: str
required: yes
state:
description:
- Set to C(present), if the config should exist, and C(absent), if it should not.
type: str
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
- docker.docker_py_2_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
- "Docker API >= 1.30"
author:
- Chris Houseknecht (@chouseknecht)
- John Hu (@ushuz)
'''
EXAMPLES = '''
- name: Create config foo (from a file on the control machine)
docker_config:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
data_is_b64: true
state: present
- name: Change the config data
docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the config
two: '2'
state: present
- name: No change
docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing config
state: present
- name: Update an existing label
docker_config:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the config
one: '1'
state: present
- name: Force the (re-)creation of the config
docker_config:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove config foo
docker_config:
name: foo
state: absent
'''
RETURN = '''
config_id:
description:
- The ID assigned by Docker to the config object.
returned: success and I(state) is C(present)
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import base64
import hashlib
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
compare_generic,
RequestException,
)
from ansible.module_utils._text import to_native, to_bytes
class ConfigManager(DockerBaseClass):
def __init__(self, client, results):
super(ConfigManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_config(self):
''' Find an existing config. '''
try:
configs = self.client.configs(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
for config in configs:
if config['Spec']['Name'] == self.name:
return config
return None
def create_config(self):
''' Create a new config '''
config_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
config_id = self.client.create_config(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating config: %s" % to_native(exc))
if isinstance(config_id, dict):
config_id = config_id['ID']
return config_id
def present(self):
''' Handles state == 'present', creating or updating the config '''
config = self.get_config()
if config:
self.results['config_id'] = config['ID']
data_changed = False
attrs = config.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if data_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the config
self.absent()
config_id = self.create_config()
self.results['changed'] = True
self.results['config_id'] = config_id
else:
self.results['changed'] = True
self.results['config_id'] = self.create_config()
def absent(self):
''' Handles state == 'absent', removing the config '''
config = self.get_config()
if config:
try:
if not self.check_mode:
self.client.remove_config(config['ID'])
except APIError as exc:
self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str'),
data_is_b64=dict(type='bool', default=False),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='2.6.0',
min_docker_api_version='1.30',
)
try:
results = dict(
changed=False,
)
ConfigManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
gpl-3.0
| -4,175,368,372,472,012,300 | -2,435,482,654,648,527,400 | 29.282895 | 159 | 0.611666 | false |
FusionSP/external_chromium_org_third_party_skia
|
tools/add_codereview_message.py
|
83
|
1716
|
#!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Add message to codereview issue.
This script takes a codereview issue number as its argument and a (possibly
multi-line) message on stdin. It appends the message to the given issue.
Usage:
echo MESSAGE | %prog CODEREVIEW_ISSUE
or:
%prog CODEREVIEW_ISSUE <<EOF
MESSAGE
EOF
or:
%prog --help
"""
import optparse
import sys
import fix_pythonpath # pylint: disable=W0611
from common.py.utils import find_depot_tools # pylint: disable=W0611
import rietveld
RIETVELD_URL = 'https://codereview.chromium.org'
def add_codereview_message(issue, message):
"""Add a message to a given codereview.
Args:
codereview_url: (string) we will extract the issue number from
this url, or this could simply be the issue number.
message: (string) message to add.
"""
# Passing None for the email and password will result in a prompt or
# reuse of existing cached credentials.
my_rietveld = rietveld.Rietveld(RIETVELD_URL, email=None, password=None)
my_rietveld.add_comment(issue, message)
def main(argv):
"""main function; see module-level docstring and GetOptionParser help.
Args:
argv: sys.argv[1:]-type argument list.
"""
option_parser = optparse.OptionParser(usage=__doc__)
_, arguments = option_parser.parse_args(argv)
if len(arguments) > 1:
option_parser.error('Extra arguments.')
if len(arguments) != 1:
option_parser.error('Missing issue number.')
message = sys.stdin.read()
add_codereview_message(int(arguments[0]), message)
if __name__ == '__main__':
main(sys.argv[1:])
|
bsd-3-clause
| 2,116,493,538,569,203,000 | 1,961,317,850,466,655,500 | 23.869565 | 75 | 0.706876 | false |
ge0rgi/cinder
|
cinder/api/contrib/capabilities.py
|
2
|
2657
|
# Copyright (c) 2015 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_messaging
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.views import capabilities as capabilities_view
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.volume import rpcapi
def authorize(context, action_name):
extensions.extension_authorizer('volume', action_name)(context)
class CapabilitiesController(wsgi.Controller):
"""The Capabilities controller for the OpenStack API."""
_view_builder_class = capabilities_view.ViewBuilder
def __init__(self):
# FIXME(jdg): Is it kosher that this just
# skips the volume.api and goes straight to RPC
# from here?
self.volume_api = rpcapi.VolumeAPI()
super(CapabilitiesController, self).__init__()
def show(self, req, id):
"""Return capabilities list of given backend."""
context = req.environ['cinder.context']
authorize(context, 'capabilities')
filters = {'host_or_cluster': id, 'binary': 'cinder-volume'}
services = objects.ServiceList.get_all(context, filters)
if not services:
msg = (_("Can't find service: %s") % id)
raise exception.NotFound(msg)
topic = services[0].service_topic_queue
try:
capabilities = self.volume_api.get_capabilities(context, topic,
False)
except oslo_messaging.MessagingTimeout:
raise exception.RPCTimeout(service=topic)
return self._view_builder.summary(req, capabilities, topic)
class Capabilities(extensions.ExtensionDescriptor):
"""Capabilities support."""
name = "Capabilities"
alias = "capabilities"
updated = "2015-08-31T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
Capabilities.alias,
CapabilitiesController())
resources.append(res)
return resources
|
apache-2.0
| 8,108,284,495,005,144,000 | -6,834,980,066,358,380,000 | 34.426667 | 78 | 0.66767 | false |
aherlihy/mongo-python-driver
|
pymongo/client_options.py
|
17
|
7755
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools to parse mongo client options."""
from bson.codec_options import _parse_codec_options
from pymongo.auth import _build_credentials_tuple
from pymongo.common import validate_boolean
from pymongo import common
from pymongo.errors import ConfigurationError
from pymongo.monitoring import _EventListeners
from pymongo.pool import PoolOptions
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import make_read_preference
from pymongo.ssl_support import get_ssl_context
from pymongo.write_concern import WriteConcern
def _parse_credentials(username, password, database, options):
"""Parse authentication credentials."""
mechanism = options.get('authmechanism', 'DEFAULT')
if username is None and mechanism != 'MONGODB-X509':
return None
source = options.get('authsource', database or 'admin')
return _build_credentials_tuple(
mechanism, source, username, password, options)
def _parse_read_preference(options):
"""Parse read preference options."""
if 'read_preference' in options:
return options['read_preference']
mode = options.get('readpreference', 0)
tags = options.get('readpreferencetags')
max_staleness = options.get('maxstalenessseconds', -1)
return make_read_preference(mode, tags, max_staleness)
def _parse_write_concern(options):
"""Parse write concern options."""
concern = options.get('w')
wtimeout = options.get('wtimeout')
j = options.get('j', options.get('journal'))
fsync = options.get('fsync')
return WriteConcern(concern, wtimeout, j, fsync)
def _parse_read_concern(options):
"""Parse read concern options."""
concern = options.get('readconcernlevel')
return ReadConcern(concern)
def _parse_ssl_options(options):
"""Parse ssl options."""
use_ssl = options.get('ssl')
if use_ssl is not None:
validate_boolean('ssl', use_ssl)
certfile = options.get('ssl_certfile')
keyfile = options.get('ssl_keyfile')
passphrase = options.get('ssl_pem_passphrase')
ca_certs = options.get('ssl_ca_certs')
cert_reqs = options.get('ssl_cert_reqs')
match_hostname = options.get('ssl_match_hostname', True)
crlfile = options.get('ssl_crlfile')
ssl_kwarg_keys = [k for k in options
if k.startswith('ssl_') and options[k]]
if use_ssl == False and ssl_kwarg_keys:
raise ConfigurationError("ssl has not been enabled but the "
"following ssl parameters have been set: "
"%s. Please set `ssl=True` or remove."
% ', '.join(ssl_kwarg_keys))
if ssl_kwarg_keys and use_ssl is None:
# ssl options imply ssl = True
use_ssl = True
if use_ssl is True:
ctx = get_ssl_context(
certfile, keyfile, passphrase, ca_certs, cert_reqs, crlfile)
return ctx, match_hostname
return None, match_hostname
def _parse_pool_options(options):
"""Parse connection pool options."""
max_pool_size = options.get('maxpoolsize', common.MAX_POOL_SIZE)
min_pool_size = options.get('minpoolsize', common.MIN_POOL_SIZE)
max_idle_time_ms = options.get('maxidletimems', common.MAX_IDLE_TIME_MS)
if max_pool_size is not None and min_pool_size > max_pool_size:
raise ValueError("minPoolSize must be smaller or equal to maxPoolSize")
connect_timeout = options.get('connecttimeoutms', common.CONNECT_TIMEOUT)
socket_keepalive = options.get('socketkeepalive', False)
socket_timeout = options.get('sockettimeoutms')
wait_queue_timeout = options.get('waitqueuetimeoutms')
wait_queue_multiple = options.get('waitqueuemultiple')
event_listeners = options.get('event_listeners')
appname = options.get('appname')
ssl_context, ssl_match_hostname = _parse_ssl_options(options)
return PoolOptions(max_pool_size,
min_pool_size,
max_idle_time_ms,
connect_timeout, socket_timeout,
wait_queue_timeout, wait_queue_multiple,
ssl_context, ssl_match_hostname, socket_keepalive,
_EventListeners(event_listeners),
appname)
class ClientOptions(object):
"""ClientOptions"""
def __init__(self, username, password, database, options):
self.__options = options
self.__codec_options = _parse_codec_options(options)
self.__credentials = _parse_credentials(
username, password, database, options)
self.__local_threshold_ms = options.get(
'localthresholdms', common.LOCAL_THRESHOLD_MS)
# self.__server_selection_timeout is in seconds. Must use full name for
# common.SERVER_SELECTION_TIMEOUT because it is set directly by tests.
self.__server_selection_timeout = options.get(
'serverselectiontimeoutms', common.SERVER_SELECTION_TIMEOUT)
self.__pool_options = _parse_pool_options(options)
self.__read_preference = _parse_read_preference(options)
self.__replica_set_name = options.get('replicaset')
self.__write_concern = _parse_write_concern(options)
self.__read_concern = _parse_read_concern(options)
self.__connect = options.get('connect')
self.__heartbeat_frequency = options.get(
'heartbeatfrequencyms', common.HEARTBEAT_FREQUENCY)
@property
def _options(self):
"""The original options used to create this ClientOptions."""
return self.__options
@property
def connect(self):
"""Whether to begin discovering a MongoDB topology automatically."""
return self.__connect
@property
def codec_options(self):
"""A :class:`~bson.codec_options.CodecOptions` instance."""
return self.__codec_options
@property
def credentials(self):
"""A :class:`~pymongo.auth.MongoCredentials` instance or None."""
return self.__credentials
@property
def local_threshold_ms(self):
"""The local threshold for this instance."""
return self.__local_threshold_ms
@property
def server_selection_timeout(self):
"""The server selection timeout for this instance in seconds."""
return self.__server_selection_timeout
@property
def heartbeat_frequency(self):
"""The monitoring frequency in seconds."""
return self.__heartbeat_frequency
@property
def pool_options(self):
"""A :class:`~pymongo.pool.PoolOptions` instance."""
return self.__pool_options
@property
def read_preference(self):
"""A read preference instance."""
return self.__read_preference
@property
def replica_set_name(self):
"""Replica set name or None."""
return self.__replica_set_name
@property
def write_concern(self):
"""A :class:`~pymongo.write_concern.WriteConcern` instance."""
return self.__write_concern
@property
def read_concern(self):
"""A :class:`~pymongo.read_concern.ReadConcern` instance."""
return self.__read_concern
|
apache-2.0
| -4,134,607,513,704,937,500 | -7,517,865,940,688,259,000 | 36.463768 | 79 | 0.659317 | false |
NeuralProsthesisLab/unlock
|
unlock/analysis/test/test_data_bank.py
|
1
|
3421
|
# Copyright (c) James Percent and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.import socket
__author__ = 'jpercent'
from .. import switch
import threading
import time
import random
import unittest
class AttrTest(object):
def __init__(self):
super(AttrTest, self).__init__()
self.a = 0
self.b = 1
self.c = 2
def d(self):
self.d_value = True
def e(self, e, e1):
self.e_value = e
self.e1_value = e1
class MiscTests(unittest.TestCase):
def testSwitch(self):
correct = False
incorrect = False
val = 'v'
for case in switch(val):
if case('v'):
correct = True
break
if case('d'):
incorrect = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
correct = False
incorrect = False
val = 'd'
for case in switch(val):
if case('v'):
incorrect = True
break
if case('d'):
correct = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
correct = False
incorrect = False
val = ['efg', 'v']
for case in switch(val):
if case('v'):
incorrect = True
break
if case('d'):
incorrect = True
break
if case (['efg', 'v']):
correct = True
break
if case ():
incorrect = True
break
self.assertTrue(correct and not incorrect)
def getSuite():
return unittest.makeSuite(MiscTests,'test')
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
| -8,360,083,069,894,626,000 | -4,735,579,481,856,906,000 | 30.1 | 82 | 0.599532 | false |
jskew/gnuradio
|
gr-zeromq/python/zeromq/__init__.py
|
47
|
1139
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks for interfacing with ZeroMQ endpoints.
'''
import os
try:
from zeromq_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from zeromq_swig import *
from probe_manager import probe_manager
from rpc_manager import rpc_manager
|
gpl-3.0
| 5,146,555,721,943,828,000 | -2,708,194,897,277,224,000 | 30.638889 | 70 | 0.733977 | false |
c0cky/mediathread
|
mediathread/djangosherd/api.py
|
1
|
4549
|
# pylint: disable-msg=R0904
from tastypie import fields
from tastypie.resources import ModelResource
from mediathread.api import UserResource, TagResource
from mediathread.assetmgr.models import Asset
from mediathread.djangosherd.models import SherdNote, DiscussionIndex
from mediathread.projects.models import ProjectNote
from mediathread.taxonomy.api import TermResource
from mediathread.taxonomy.models import TermRelationship
class SherdNoteResource(ModelResource):
author = fields.ForeignKey(UserResource, 'author',
full=True, null=True, blank=True)
class Meta:
queryset = SherdNote.objects.select_related('asset').order_by("id")
excludes = ['tags', 'body', 'added', 'modified']
list_allowed_methods = []
detail_allowed_methods = []
def dehydrate(self, bundle):
try:
bundle.data['is_global_annotation'] = \
bundle.obj.is_global_annotation()
bundle.data['asset_id'] = str(bundle.obj.asset.id)
bundle.data['is_null'] = bundle.obj.is_null()
bundle.data['annotation'] = bundle.obj.annotation()
bundle.data['url'] = bundle.obj.get_absolute_url()
modified = bundle.obj.modified.strftime("%m/%d/%y %I:%M %p") \
if bundle.obj.modified else ''
bundle.data['metadata'] = {
'tags': TagResource().render_list(bundle.request,
bundle.obj.tags_split()),
'body': bundle.obj.body.strip() if bundle.obj.body else '',
'primary_type': bundle.obj.asset.primary.label,
'modified': modified,
'timecode': bundle.obj.range_as_timecode(),
'title': bundle.obj.title
}
editable = (bundle.request.user.id ==
getattr(bundle.obj, 'author_id', -1))
citable = bundle.request.GET.get('citable', '') == 'true'
# assumed: there is only one ProjectNote per annotation
reference = ProjectNote.objects.filter(
annotation__id=bundle.obj.id).first()
if reference:
# notes in a submitted response are not editable
editable = editable and not reference.project.is_submitted()
if citable:
# this is a heavy operation. don't call it unless needed
citable = reference.project.can_cite(bundle.request.course,
bundle.request.user)
bundle.data['editable'] = editable
bundle.data['citable'] = citable
termResource = TermResource()
vocabulary = {}
related = TermRelationship.objects.get_for_object(
bundle.obj).prefetch_related('term__vocabulary')
for rel in related:
if rel.term.vocabulary.id not in vocabulary:
vocabulary[rel.term.vocabulary.id] = {
'id': rel.term.vocabulary.id,
'display_name': rel.term.vocabulary.display_name,
'terms': []
}
vocabulary[rel.term.vocabulary.id]['terms'].append(
termResource.render_one(bundle.request, rel.term))
bundle.data['vocabulary'] = vocabulary.values()
except Asset.DoesNotExist:
bundle.data['asset_id'] = ''
bundle.data['metadata'] = {'title': 'Item Deleted'}
return bundle
def render_one(self, request, selection, asset_key):
# assumes user is allowed to see this note
bundle = self.build_bundle(obj=selection, request=request)
dehydrated = self.full_dehydrate(bundle)
bundle.data['asset_key'] = '%s_%s' % (asset_key,
bundle.data['asset_id'])
return self._meta.serializer.to_simple(dehydrated, None)
class DiscussionIndexResource(object):
def render_list(self, request, indicies):
collaborations = DiscussionIndex.with_permission(request, indicies)
ctx = {
'references': [{
'id': obj.collaboration.object_pk,
'title': obj.collaboration.title,
'type': obj.get_type_label(),
'url': obj.get_absolute_url(),
'modified': obj.modified.strftime("%m/%d/%y %I:%M %p")}
for obj in collaborations]}
return ctx
|
gpl-2.0
| -106,036,486,170,903,060 | -7,822,268,618,294,718,000 | 41.915094 | 79 | 0.56386 | false |
x111ong/django
|
tests/auth_tests/test_views.py
|
183
|
44561
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='[email protected]', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='[email protected]', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='[email protected]',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("[email protected]", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='[email protected]').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = '[email protected]'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='[email protected]', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='[email protected]')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='[email protected]', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
|
bsd-3-clause
| 7,384,450,522,389,096,000 | 1,889,390,013,376,242,000 | 42.557185 | 119 | 0.623062 | false |
brianjgeiger/osf.io
|
api_tests/taxonomies/views/test_taxonomy_list.py
|
15
|
4498
|
import pytest
from django.db.models import BooleanField, Case, When
from api.base.settings.defaults import API_BASE
from osf.models import Subject
from osf_tests.factories import SubjectFactory
@pytest.mark.django_db
class TestTaxonomy:
@pytest.fixture(autouse=True)
def subject(self):
return SubjectFactory(text='A')
@pytest.fixture(autouse=True)
def subject_other(self):
return SubjectFactory(text='Other Sub')
@pytest.fixture(autouse=True)
def subject_a(self):
return SubjectFactory(text='Z')
@pytest.fixture(autouse=True)
def subject_child_one(self, subject):
return SubjectFactory(parent=subject)
@pytest.fixture(autouse=True)
def subject_child_two(self, subject):
return SubjectFactory(parent=subject)
@pytest.fixture()
def subjects(self):
return Subject.objects.all().annotate(is_other=Case(
When(text__istartswith='other', then=True),
default=False,
output_field=BooleanField()
)).order_by('is_other', 'text')
@pytest.fixture()
def url_subject_list(self):
return '/{}taxonomies/'.format(API_BASE)
@pytest.fixture()
def res_subject_list(self, app, url_subject_list):
return app.get(url_subject_list)
@pytest.fixture()
def data_subject_list(self, app, res_subject_list):
return res_subject_list.json['data']
def test_taxonomy_other_ordering(self, subject_other, data_subject_list):
assert data_subject_list[-1]['id'] == subject_other._id
def test_taxonomy_success(
self, subject, subject_child_one, subject_child_two,
subjects, res_subject_list):
# make sure there are subjects to filter through
assert len(subjects) > 0
assert res_subject_list.status_code == 200
assert res_subject_list.content_type == 'application/vnd.api+json'
def test_taxonomy_text(self, subjects, data_subject_list):
for index, subject in enumerate(subjects):
if index >= len(data_subject_list):
break # only iterate though first page of results
assert data_subject_list[index]['attributes']['text'] == subject.text
def test_taxonomy_parents(self, subjects, data_subject_list):
for index, subject in enumerate(subjects):
if index >= len(data_subject_list):
break
parents_ids = []
for parent in data_subject_list[index]['attributes']['parents']:
parents_ids.append(parent['id'])
if subject.parent:
assert subject.parent._id in parents_ids
def test_taxonomy_filter_top_level(
self, app, subject, subject_child_one,
subject_child_two, url_subject_list):
top_level_subjects = Subject.objects.filter(parent__isnull=True)
top_level_url = '{}?filter[parents]=null'.format(url_subject_list)
res = app.get(top_level_url)
assert res.status_code == 200
data = res.json['data']
assert len(top_level_subjects) == len(data)
assert len(top_level_subjects) > 0
for subject in data:
assert subject['attributes']['parents'] == []
def test_taxonomy_filter_by_parent(self, app, url_subject_list, subject):
children_subjects = Subject.objects.filter(parent__id=subject.id)
children_url = '{}?filter[parents]={}'.format(
url_subject_list, subject._id)
res = app.get(children_url)
assert res.status_code == 200
data = res.json['data']
assert len(children_subjects) == len(data)
for subject_ in data:
parents_ids = []
for parent in subject_['attributes']['parents']:
parents_ids.append(parent['id'])
assert subject._id in parents_ids
def test_is_deprecated(self, app, url_subject_list):
res = app.get(
'{}?version=2.6'.format(url_subject_list),
expect_errors=True)
assert res.status_code == 404
def test_taxonomy_path(self, data_subject_list):
for item in data_subject_list:
subj = Subject.objects.get(_id=item['id'])
path_parts = item['attributes']['path'].split('|')
assert path_parts[0] == subj.provider.share_title
for index, text in enumerate(
[s.text for s in subj.object_hierarchy]):
assert path_parts[index + 1] == text
|
apache-2.0
| 3,390,481,118,068,346,000 | -5,027,580,813,486,566,000 | 34.984 | 81 | 0.617386 | false |
rtindru/django
|
django/contrib/gis/gdal/geomtype.py
|
297
|
3228
|
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0: 'Unknown',
1: 'Point',
2: 'LineString',
3: 'Polygon',
4: 'MultiPoint',
5: 'MultiLineString',
6: 'MultiPolygon',
7: 'GeometryCollection',
100: 'None',
101: 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit: 'MultiLineString25D',
6 + wkb25bit: 'MultiPolygon25D',
7 + wkb25bit: 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = {v.lower(): k for k, v in _types.items()}
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry':
type_input = 'unknown'
num = self._str_types.get(type_input)
if num is None:
raise GDALException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if type_input not in self._types:
raise GDALException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
def to_multi(self):
"""
Transform Point, LineString, Polygon, and their 25D equivalents
to their Multi... counterpart.
"""
if self.name.startswith(('Point', 'LineString', 'Polygon')):
self.num += 3
|
bsd-3-clause
| -4,201,873,789,643,854,300 | -670,265,902,359,270,800 | 32.978947 | 80 | 0.549566 | false |
Farkal/kivy
|
kivy/uix/gridlayout.py
|
12
|
14937
|
'''
Grid Layout
===========
.. only:: html
.. image:: images/gridlayout.gif
:align: right
.. only:: latex
.. image:: images/gridlayout.png
:align: right
.. versionadded:: 1.0.4
The :class:`GridLayout` arranges children in a matrix. It takes the available
space and divides it into columns and rows, then adds widgets to the resulting
"cells".
.. versionchanged:: 1.0.7
The implementation has changed to use the widget size_hint for calculating
column/row sizes. `uniform_width` and `uniform_height` have been removed
and other properties have added to give you more control.
Background
----------
Unlike many other toolkits, you cannot explicitly place a widget in a specific
column/row. Each child is automatically assigned a position determined by the
layout configuration and the child's index in the children list.
A GridLayout must always have at least one input constraint:
:attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols
or rows, the Layout will throw an exception.
Column Width and Row Height
---------------------------
The column width/row height are determined in 3 steps:
- The initial size is given by the :attr:`col_default_width` and
:attr:`row_default_height` properties. To customize the size of a single
column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`.
- The `size_hint_x`/`size_hint_y` of the children are taken into account.
If no widgets have a size hint, the maximum size is used for all
children.
- You can force the default size by setting the :attr:`col_force_default`
or :attr:`row_force_default` property. This will force the layout to
ignore the `width` and `size_hint` properties of children and use the
default size.
Using a GridLayout
------------------
In the example below, all widgets will have an equal size. By default, the
`size_hint` is (1, 1), so a Widget will take the full size of the parent::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1'))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2'))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_1.jpg
Now, let's fix the size of Hello buttons to 100px instead of using
size_hint_x=1::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_2.jpg
Next, let's fix the row height to a specific size::
layout = GridLayout(cols=2, row_force_default=True, row_default_height=40)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_3.jpg
'''
__all__ = ('GridLayout', 'GridLayoutException')
from kivy.logger import Logger
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \
BoundedNumericProperty, ReferenceListProperty, VariableListProperty
from math import ceil
def nmax(*args):
'''(internal) Implementation of a max() function that supports None.
'''
# merge into one list
args = [x for x in args if x is not None]
return max(args)
class GridLayoutException(Exception):
'''Exception for errors if the grid layout manipulation fails.
'''
pass
class GridLayout(Layout):
'''Grid layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0, 0, 0].
'''
cols = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of columns in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
rows = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of rows in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to a BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
col_default_width = NumericProperty(0)
'''Default minimum size to use for a column.
.. versionadded:: 1.0.7
:attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
row_default_height = NumericProperty(0)
'''Default minimum size to use for row.
.. versionadded:: 1.0.7
:attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
col_force_default = BooleanProperty(False)
'''If True, ignore the width and size_hint_x of the child and use the
default column width.
.. versionadded:: 1.0.7
:attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
row_force_default = BooleanProperty(False)
'''If True, ignore the height and size_hint_y of the child and use the
default row height.
.. versionadded:: 1.0.7
:attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
cols_minimum = DictProperty({})
'''List of minimum sizes for each column.
.. versionadded:: 1.0.7
:attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
rows_minimum = DictProperty({})
'''List of minimum sizes for each row.
.. versionadded:: 1.0.7
:attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties.
'''
def __init__(self, **kwargs):
self._cols = self._rows = None
super(GridLayout, self).__init__(**kwargs)
self.bind(
col_default_width=self._trigger_layout,
row_default_height=self._trigger_layout,
col_force_default=self._trigger_layout,
row_force_default=self._trigger_layout,
cols=self._trigger_layout,
rows=self._trigger_layout,
parent=self._trigger_layout,
spacing=self._trigger_layout,
padding=self._trigger_layout,
children=self._trigger_layout,
size=self._trigger_layout,
pos=self._trigger_layout)
def get_max_widgets(self):
if self.cols and not self.rows:
return None
if self.rows and not self.cols:
return None
if not self.cols and not self.rows:
return None
return self.rows * self.cols
def on_children(self, instance, value):
# if that makes impossible to construct things with deffered method,
# migrate this test in do_layout, and/or issue a warning.
smax = self.get_max_widgets()
if smax and len(value) > smax:
raise GridLayoutException(
'Too many children in GridLayout. Increase rows/cols!')
def update_minimum_size(self, *largs):
# the goal here is to calculate the minimum size of every cols/rows
# and determine if they have stretch or not
current_cols = self.cols
current_rows = self.rows
children = self.children
len_children = len(children)
# if no cols or rows are set, we can't calculate minimum size.
# the grid must be contrained at least on one side
if not current_cols and not current_rows:
Logger.warning('%r have no cols or rows set, '
'layout is not triggered.' % self)
return None
if current_cols is None:
current_cols = int(ceil(len_children / float(current_rows)))
elif current_rows is None:
current_rows = int(ceil(len_children / float(current_cols)))
current_cols = max(1, current_cols)
current_rows = max(1, current_rows)
cols = [self.col_default_width] * current_cols
cols_sh = [None] * current_cols
rows = [self.row_default_height] * current_rows
rows_sh = [None] * current_rows
# update minimum size from the dicts
# FIXME index might be outside the bounds ?
for index, value in self.cols_minimum.items():
cols[index] = value
for index, value in self.rows_minimum.items():
rows[index] = value
# calculate minimum size for each columns and rows
i = len_children - 1
for row in range(current_rows):
for col in range(current_cols):
# don't go further is we don't have child left
if i < 0:
break
# get initial information from the child
c = children[i]
shw = c.size_hint_x
shh = c.size_hint_y
w = c.width
h = c.height
# compute minimum size / maximum stretch needed
if shw is None:
cols[col] = nmax(cols[col], w)
else:
cols_sh[col] = nmax(cols_sh[col], shw)
if shh is None:
rows[row] = nmax(rows[row], h)
else:
rows_sh[row] = nmax(rows_sh[row], shh)
# next child
i = i - 1
# calculate minimum width/height needed, starting from padding +
# spacing
padding_x = self.padding[0] + self.padding[2]
padding_y = self.padding[1] + self.padding[3]
spacing_x, spacing_y = self.spacing
width = padding_x + spacing_x * (current_cols - 1)
height = padding_y + spacing_y * (current_rows - 1)
# then add the cell size
width += sum(cols)
height += sum(rows)
# remember for layout
self._cols = cols
self._rows = rows
self._cols_sh = cols_sh
self._rows_sh = rows_sh
# finally, set the minimum size
self.minimum_size = (width, height)
def do_layout(self, *largs):
self.update_minimum_size()
if self._cols is None:
return
if self.cols is None and self.rows is None:
raise GridLayoutException('Need at least cols or rows constraint.')
children = self.children
len_children = len(children)
if len_children == 0:
return
# speedup
padding_left = self.padding[0]
padding_top = self.padding[1]
spacing_x, spacing_y = self.spacing
selfx = self.x
selfw = self.width
selfh = self.height
# resolve size for each column
if self.col_force_default:
cols = [self.col_default_width] * len(self._cols)
for index, value in self.cols_minimum.items():
cols[index] = value
else:
cols = self._cols[:]
cols_sh = self._cols_sh
cols_weigth = sum([x for x in cols_sh if x])
strech_w = max(0, selfw - self.minimum_width)
for index in range(len(cols)):
# if the col don't have strech information, nothing to do
col_stretch = cols_sh[index]
if col_stretch is None:
continue
# calculate the column stretch, and take the maximum from
# minimum size and the calculated stretch
col_width = cols[index]
col_width = max(col_width,
strech_w * col_stretch / cols_weigth)
cols[index] = col_width
# same algo for rows
if self.row_force_default:
rows = [self.row_default_height] * len(self._rows)
for index, value in self.rows_minimum.items():
rows[index] = value
else:
rows = self._rows[:]
rows_sh = self._rows_sh
rows_weigth = sum([x for x in rows_sh if x])
strech_h = max(0, selfh - self.minimum_height)
for index in range(len(rows)):
# if the row don't have strech information, nothing to do
row_stretch = rows_sh[index]
if row_stretch is None:
continue
# calculate the row stretch, and take the maximum from minimum
# size and the calculated stretch
row_height = rows[index]
row_height = max(row_height,
strech_h * row_stretch / rows_weigth)
rows[index] = row_height
# reposition every child
i = len_children - 1
y = self.top - padding_top
for row_height in rows:
x = selfx + padding_left
for col_width in cols:
if i < 0:
break
c = children[i]
c.x = x
c.y = y - row_height
c.width = col_width
c.height = row_height
i = i - 1
x = x + col_width + spacing_x
y -= row_height + spacing_y
|
mit
| 561,680,928,621,163,200 | 1,115,341,670,871,981,000 | 32.717833 | 79 | 0.602397 | false |
fdslight/fdslight
|
freenet/handlers/tundev.py
|
1
|
5566
|
#!/usr/bin/env python3
import os, sys
import pywind.evtframework.handlers.handler as handler
import freenet.lib.fn_utils as fn_utils
import freenet.lib.simple_qos as simple_qos
try:
import fcntl
except ImportError:
pass
class tun_base(handler.handler):
__creator_fd = None
# 要写入到tun的IP包
___ip_packets_for_write = []
# 写入tun设备的最大IP数据包的个数
__MAX_WRITE_QUEUE_SIZE = 1024
# 当前需要写入tun设备的IP数据包的个数
__current_write_queue_n = 0
__BLOCK_SIZE = 16 * 1024
__qos = None
def __create_tun_dev(self, name):
"""创建tun 设备
:param name:
:return fd:
"""
tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI)
fn_utils.interface_up(name)
if tun_fd < 0:
raise SystemError("can not create tun device,please check your root")
return tun_fd
@property
def creator(self):
return self.__creator_fd
def init_func(self, creator_fd, tun_dev_name, *args, **kwargs):
"""
:param creator_fd:
:param tun_dev_name:tun 设备名称
:param subnet:如果是服务端则需要则个参数
"""
tun_fd = self.__create_tun_dev(tun_dev_name)
if tun_fd < 3:
print("error:create tun device failed:%s" % tun_dev_name)
sys.exit(-1)
self.__creator_fd = creator_fd
self.__qos = simple_qos.qos(simple_qos.QTYPE_DST)
self.set_fileno(tun_fd)
fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK)
self.dev_init(tun_dev_name, *args, **kwargs)
return tun_fd
def dev_init(self, dev_name, *args, **kwargs):
pass
def evt_read(self):
for i in range(32):
try:
ip_packet = os.read(self.fileno, self.__BLOCK_SIZE)
except BlockingIOError:
break
self.__qos.add_to_queue(ip_packet)
self.__qos_from_tundev()
def task_loop(self):
self.__qos_from_tundev()
def __qos_from_tundev(self):
results = self.__qos.get_queue()
for ip_packet in results:
self.handle_ip_packet_from_read(ip_packet)
if not results:
self.del_loop_task(self.fileno)
else:
self.add_to_loop_task(self.fileno)
def evt_write(self):
try:
ip_packet = self.___ip_packets_for_write.pop(0)
except IndexError:
self.remove_evt_write(self.fileno)
return
self.__current_write_queue_n -= 1
try:
os.write(self.fileno, ip_packet)
except BlockingIOError:
self.__current_write_queue_n += 1
self.___ip_packets_for_write.insert(0, ip_packet)
return
''''''
def handle_ip_packet_from_read(self, ip_packet):
"""处理读取过来的IP包,重写这个方法
:param ip_packet:
:return None:
"""
pass
def handle_ip_packet_for_write(self, ip_packet):
"""处理要写入的IP包,重写这个方法
:param ip_packet:
:return new_ip_packet:
"""
pass
def error(self):
self.dev_error()
def dev_error(self):
"""重写这个方法
:return:
"""
pass
def timeout(self):
self.dev_timeout()
def dev_timeout(self):
"""重写这个方法
:return:
"""
pass
def delete(self):
self.dev_delete()
def dev_delete(self):
"""重写这个方法
:return:
"""
pass
def add_to_sent_queue(self, ip_packet):
# 丢到超出规定的数据包,防止内存过度消耗
n_ip_message = self.handle_ip_packet_for_write(ip_packet)
if not n_ip_message: return
if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE:
# 删除第一个包,防止队列过多
self.__current_write_queue_n -= 1
self.___ip_packets_for_write.pop(0)
return
self.__current_write_queue_n += 1
self.___ip_packets_for_write.append(n_ip_message)
class tundevs(tun_base):
"""服务端的tun数据处理
"""
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.send_msg_to_tunnel_from_tun(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def handle_msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
class tundevc(tun_base):
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.handle_msg_from_tundev(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
|
bsd-2-clause
| 3,101,656,755,947,288,600 | 1,397,184,469,954,669,300 | 23.490741 | 84 | 0.567864 | false |
nuodb/nuodb-django
|
test/auth/tests/tokens.py
|
1
|
2747
|
import sys
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.utils import unittest
from django.test.testcases import skipIfDBFeature
@skipIfCustomUser
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
@skipIfDBFeature('supports_transactions')
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', '[email protected]', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
@unittest.skipIf(sys.version_info[:2] >= (3, 0), "Unnecessary test with Python 3")
def test_date_length(self):
"""
Make sure we don't allow overly long dates, causing a potential DoS.
"""
user = User.objects.create_user('ima1337h4x0r', '[email protected]', 'p4ssw0rd')
p0 = PasswordResetTokenGenerator()
# This will put a 14-digit base36 timestamp into the token, which is too large.
self.assertRaises(ValueError,
p0._make_token_with_timestamp,
user, 175455491841851871349)
|
bsd-3-clause
| -1,139,411,346,703,579,600 | 6,172,335,409,410,217,000 | 38.242857 | 88 | 0.646159 | false |
Evervolv/android_external_chromium_org
|
chrome/common/extensions/docs/server2/subversion_file_system.py
|
23
|
7618
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
import xml.dom.minidom as xml
from xml.parsers.expat import ExpatError
from appengine_url_fetcher import AppEngineUrlFetcher
from docs_server_utils import StringIdentity
from file_system import FileSystem, FileNotFoundError, StatInfo, ToUnicode
from future import Future
import svn_constants
import url_constants
def _ParseHTML(html):
'''Unfortunately, the viewvc page has a stray </div> tag, so this takes care
of all mismatched tags.
'''
try:
return xml.parseString(html)
except ExpatError as e:
return _ParseHTML('\n'.join(
line for (i, line) in enumerate(html.split('\n'))
if e.lineno != i + 1))
def _InnerText(node):
'''Like node.innerText in JS DOM, but strips surrounding whitespace.
'''
text = []
if node.nodeValue:
text.append(node.nodeValue)
if hasattr(node, 'childNodes'):
for child_node in node.childNodes:
text.append(_InnerText(child_node))
return ''.join(text).strip()
def _CreateStatInfo(html):
parent_version = None
child_versions = {}
# Try all of the tables until we find the ones that contain the data (the
# directory and file versions are in different tables).
for table in _ParseHTML(html).getElementsByTagName('table'):
# Within the table there is a list of files. However, there may be some
# things beforehand; a header, "parent directory" list, etc. We will deal
# with that below by being generous and just ignoring such rows.
rows = table.getElementsByTagName('tr')
for row in rows:
cells = row.getElementsByTagName('td')
# The version of the directory will eventually appear in the soup of
# table rows, like this:
#
# <tr>
# <td>Directory revision:</td>
# <td><a href=... title="Revision 214692">214692</a> (of...)</td>
# </tr>
#
# So look out for that.
if len(cells) == 2 and _InnerText(cells[0]) == 'Directory revision:':
links = cells[1].getElementsByTagName('a')
if len(links) != 2:
raise ValueError('ViewVC assumption invalid: directory revision ' +
'content did not have 2 <a> elements, instead %s' %
_InnerText(cells[1]))
this_parent_version = _InnerText(links[0])
int(this_parent_version) # sanity check
if parent_version is not None:
raise ValueError('There was already a parent version %s, and we ' +
'just found a second at %s' % (parent_version,
this_parent_version))
parent_version = this_parent_version
# The version of each file is a list of rows with 5 cells: name, version,
# age, author, and last log entry. Maybe the columns will change; we're
# at the mercy viewvc, but this constant can be easily updated.
if len(cells) != 5:
continue
name_element, version_element, _, __, ___ = cells
name = _InnerText(name_element) # note: will end in / for directories
try:
version = int(_InnerText(version_element))
except StandardError:
continue
child_versions[name] = str(version)
if parent_version and child_versions:
break
return StatInfo(parent_version, child_versions)
class _AsyncFetchFuture(object):
def __init__(self, paths, fetcher, binary, args=None):
def apply_args(path):
return path if args is None else '%s?%s' % (path, args)
# A list of tuples of the form (path, Future).
self._fetches = [(path, fetcher.FetchAsync(apply_args(path)))
for path in paths]
self._value = {}
self._error = None
self._binary = binary
def _ListDir(self, directory):
dom = xml.parseString(directory)
files = [elem.childNodes[0].data for elem in dom.getElementsByTagName('a')]
if '..' in files:
files.remove('..')
return files
def Get(self):
for path, future in self._fetches:
try:
result = future.Get()
except Exception as e:
raise FileNotFoundError(
'%s fetching %s for Get: %s' % (e.__class__.__name__, path, e))
if result.status_code == 404:
raise FileNotFoundError('Got 404 when fetching %s for Get' % path)
elif path.endswith('/'):
self._value[path] = self._ListDir(result.content)
elif not self._binary:
self._value[path] = ToUnicode(result.content)
else:
self._value[path] = result.content
if self._error is not None:
raise self._error
return self._value
class SubversionFileSystem(FileSystem):
'''Class to fetch resources from src.chromium.org.
'''
@staticmethod
def Create(branch='trunk', revision=None):
if branch == 'trunk':
svn_path = 'trunk/src/%s' % svn_constants.EXTENSIONS_PATH
else:
svn_path = 'branches/%s/src/%s' % (branch, svn_constants.EXTENSIONS_PATH)
return SubversionFileSystem(
AppEngineUrlFetcher('%s/%s' % (url_constants.SVN_URL, svn_path)),
AppEngineUrlFetcher('%s/%s' % (url_constants.VIEWVC_URL, svn_path)),
svn_path,
revision=revision)
def __init__(self, file_fetcher, stat_fetcher, svn_path, revision=None):
self._file_fetcher = file_fetcher
self._stat_fetcher = stat_fetcher
self._svn_path = svn_path
self._revision = revision
def Read(self, paths, binary=False):
args = None
if self._revision is not None:
# |fetcher| gets from svn.chromium.org which uses p= for version.
args = 'p=%s' % self._revision
return Future(delegate=_AsyncFetchFuture(paths,
self._file_fetcher,
binary,
args=args))
def Stat(self, path):
directory, filename = posixpath.split(path)
directory += '/'
if self._revision is not None:
# |stat_fetch| uses viewvc which uses pathrev= for version.
directory += '?pathrev=%s' % self._revision
try:
result = self._stat_fetcher.Fetch(directory)
except Exception as e:
# Convert all errors (typically some sort of DeadlineExceededError but
# explicitly catching that seems not to work) to a FileNotFoundError to
# reduce the exception-catching surface area of this class.
raise FileNotFoundError(
'%s fetching %s for Stat: %s' % (e.__class__.__name__, path, e))
if result.status_code != 200:
raise FileNotFoundError('Got %s when fetching %s for Stat' % (
result.status_code, path))
stat_info = _CreateStatInfo(result.content)
if stat_info.version is None:
raise ValueError('Failed to find version of dir %s' % directory)
if path.endswith('/'):
return stat_info
if filename not in stat_info.child_versions:
raise FileNotFoundError(
'%s from %s was not in child versions for Stat' % (filename, path))
return StatInfo(stat_info.child_versions[filename])
def GetIdentity(self):
# NOTE: no revision here, since it would mess up the caching of reads. It
# probably doesn't matter since all the caching classes will use the result
# of Stat to decide whether to re-read - and Stat has a ceiling of the
# revision - so when the revision changes, so might Stat. That is enough.
return '@'.join((self.__class__.__name__, StringIdentity(self._svn_path)))
|
bsd-3-clause
| 1,603,897,685,933,166,000 | -4,217,972,152,624,696,300 | 37.09 | 79 | 0.6335 | false |
sdague/home-assistant
|
homeassistant/scripts/ensure_config.py
|
18
|
1344
|
"""Script to ensure a configuration file exists."""
import argparse
import asyncio
import os
import homeassistant.config as config_util
from homeassistant.core import HomeAssistant
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=("Ensure a Home Assistant config exists, creates one if necessary.")
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
config_path = asyncio.run(async_run(config_dir))
print("Configuration file:", config_path)
return 0
async def async_run(config_dir):
"""Make sure config exists."""
hass = HomeAssistant()
hass.config.config_dir = config_dir
path = await config_util.async_ensure_config_exists(hass)
await hass.async_stop(force=True)
return path
|
apache-2.0
| 9,010,688,163,281,755,000 | -1,825,592,278,927,740,000 | 28.217391 | 88 | 0.681548 | false |
joelddiaz/openshift-tools
|
ansible/roles/lib_zabbix/library/zbx_trigger.py
|
13
|
8058
|
#!/usr/bin/env python
'''
ansible module for zabbix triggers
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix trigger ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_priority(priority):
''' determine priority
'''
prior = 0
if 'info' in priority:
prior = 1
elif 'warn' in priority:
prior = 2
elif 'avg' == priority or 'ave' in priority:
prior = 3
elif 'high' in priority:
prior = 4
elif 'dis' in priority:
prior = 5
return prior
def get_deps(zapi, deps):
''' get trigger dependencies
'''
results = []
for desc in deps:
content = zapi.get_content('trigger',
'get',
{'filter': {'description': desc},
'expandExpression': True,
'selectDependencies': 'triggerid',
})
if content.has_key('result'):
results.append({'triggerid': content['result'][0]['triggerid']})
return results
def get_trigger_status(inc_status):
''' Determine the trigger's status
0 is enabled
1 is disabled
'''
r_status = 0
if inc_status == 'disabled':
r_status = 1
return r_status
def get_template_id(zapi, template_name):
'''
get related templates
'''
template_ids = []
app_ids = {}
# Fetch templates by name
content = zapi.get_content('template',
'get',
{'search': {'host': template_name},
'selectApplications': ['applicationid', 'name']})
if content.has_key('result'):
template_ids.append(content['result'][0]['templateid'])
for app in content['result'][0]['applications']:
app_ids[app['name']] = app['applicationid']
return template_ids, app_ids
def main():
'''
Create a trigger in zabbix
Example:
"params": {
"description": "Processor load is too high on {HOST.NAME}",
"expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5",
"dependencies": [
{
"triggerid": "14062"
}
]
},
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
expression=dict(default=None, type='str'),
name=dict(default=None, type='str'),
description=dict(default=None, type='str'),
dependencies=dict(default=[], type='list'),
priority=dict(default='avg', type='str'),
url=dict(default=None, type='str'),
status=dict(default=None, type='str'),
state=dict(default='present', type='str'),
template_name=dict(default=None, type='str'),
hostgroup_name=dict(default=None, type='str'),
query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'trigger'
idname = "triggerid"
state = module.params['state']
tname = module.params['name']
templateid = None
if module.params['template_name']:
templateid, _ = get_template_id(zapi, module.params['template_name'])
content = zapi.get_content(zbx_class_name,
'get',
{module.params['query_type']: {'description': tname},
'expandExpression': True,
'selectDependencies': 'triggerid',
'templateids': templateid,
'group': module.params['hostgroup_name'],
})
# Get
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
# Delete
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
params = {'description': tname,
'comments': module.params['description'],
'expression': module.params['expression'],
'dependencies': get_deps(zapi, module.params['dependencies']),
'priority': get_priority(module.params['priority']),
'url': module.params['url'],
'status': get_trigger_status(module.params['status']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.fail_json(msg=content['error'])
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.fail_json(msg=content['error'])
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
apache-2.0
| 406,859,531,512,320,800 | 745,837,290,526,188,200 | 32.857143 | 94 | 0.558451 | false |
pjdelport/django
|
tests/modeltests/update_only_fields/tests.py
|
7
|
9587
|
from __future__ import absolute_import
from django.db.models.signals import pre_save, post_save
from django.test import TestCase
from .models import Person, Employee, ProxyEmployee, Profile, Account
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instace should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1,a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertTrue('name' in pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertTrue('name' in post_save_data[0])
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
|
bsd-3-clause
| 3,807,988,312,176,709,600 | 3,770,326,936,686,988,000 | 36.449219 | 93 | 0.609054 | false |
sserrot/champion_relationships
|
venv/Lib/site-packages/pygments/styles/vim.py
|
4
|
1976
|
# -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
mit
| 1,417,919,265,468,610,800 | 8,975,401,244,024,920,000 | 30.365079 | 70 | 0.448887 | false |
durden/dayonetools
|
dayonetools/services/habit_list.py
|
1
|
10350
|
"""
This module provides a way to import data from the Habit List iPhone
application (http://habitlist.com/) into Day One Journal
(http://dayoneapp.com/) entries.
To use this module you must first do a manual export of your data from Habit
list. This can be done by the following:
- Open Habit List iPhone app
- Click the 'gear' icon for settings at the bottom of the main 'Today' view
- Choose the 'Export Data' option
- E-mail the data to yourself
- Copy and paste the e-mail contents into a file of your choosing
- Remove the 'sent from iPhone' line at the end of your e-mail. This
will cause the script to NOT process the JSON data.
- DO NOT REMOVE THE LAST TWO EMPTY LINES OF THE E-MAIL. WE CURRENTLY
HAVE A BUG THAT EXPECTS THESE LINES.
- You can choose to optionally remove the first few lines of the e-mail
that are not JSON data, everything up to the first '[' character.
- Again, this is optional because this module will attempt to ignore
any non-JSON data at the START of a file.
At this point, you are ready to do the actual conversion from JSON to Day One
entires. So, you should check all the 'settings' in this module for things you
would like to change:
- HEADER_FOR_DAY_ONE_ENTRIES
- DAYONE_ENTRIES
- ENTRY_TEMPLATE
- TIMEZONE
- Make sure to choose the timezone of your iPhone because the Habit
List app stores all timezones in UTC and you'll want to convert this
to the timezone your iPhone used at the time you completed the habit.
This will ensure your Day One entries match the time you completed
the task and also prevent a habit from showing up more than once per
day which can happen with UTC time if you complete a habit late in
one day and early in the next, etc.
- You can find a list of available timezone strings here:
- http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
Next, you can run this module with your exported JSON data as an argument like
so:
- python services/habit_list.py -f habit_list_data.json -t
Also, it's encouraged to run this with the '-t' option first so that all your
Day One entires will be created in a local directory called 'test.' This will
allow you to inspect the conversion. You can manually copy a few select
entries into your Day One 'entries/' folder to ensure you approve of the
formatting and can easily make any formatting adjustments. Then, you can run
this module again without the '-t' to fully import Habit List entries into Day
One.
"""
import argparse
import collections
from datetime import datetime
import json
import os
import re
import uuid
from dateutil import tz
from dayonetools.services import convert_to_dayone_date_string
DAYONE_ENTRIES = '/Users/durden/Dropbox/Apps/Day One/Journal.dayone/entries/'
# This text will be inserted into the first line of all entries created, set to
# '' to remove this completely.
HEADER_FOR_DAYONE_ENTRIES = 'Habit List entry'
# Note the strange lack of indentation on the {entry_text} b/c day one will
# display special formatting to text that is indented, which we want to avoid.
ENTRY_TEMPLATE = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Creation Date</key>
<date>{date}</date>
<key>Entry Text</key>
<string> {entry_title}
<![CDATA[
{habits}]]>
#habits #habit_list
</string>
<key>Starred</key>
<false/>
<key>Tags</key>
<array>
<string>habits</string>
<string>habit_list</string>
</array>
<key>UUID</key>
<string>{uuid_str}</string>
</dict>
</plist>
"""
TIMEZONE = 'America/Chicago'
def _parse_args():
"""Parse sys.argv arguments"""
parser = argparse.ArgumentParser(
description='Export Habit List data to Day One')
parser.add_argument('-f', '--file', action='store',
dest='input_file', required=True,
help='JSON file to import from')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
dest='verbose', required=False,
help='Verbose debugging information')
parser.add_argument('-t', '--test', default=False, action='store_true',
dest='test', required=False,
help=('Test import by creating Day one files in local '
'directory for inspect'))
def _datetime(str_):
"""Convert date string in YYYY-MM-DD format to datetime object"""
if not str_:
return None
try:
date = datetime.strptime(str_, '%Y-%m-%d')
except ValueError:
msg = 'Invalid date format, should be YYYY-MM-DD'
raise argparse.ArgumentTypeError(msg)
return date.replace(tzinfo=_user_time_zone())
parser.add_argument('-s', '--since', type=_datetime,
help=('Only process entries starting with YYYY-MM-DD '
'and newer'))
return vars(parser.parse_args())
def _user_time_zone():
"""Get default timezone for user"""
try:
return tz.gettz(TIMEZONE)
except Exception as err:
print 'Failed getting timezone, check your TIMEZONE variable'
raise
def _user_time_zone_date(dt, user_time_zone, utc_time_zone):
"""
Convert given datetime string into a yyyy-mm-dd string taking into
account the user time zone
Keep in mind that this conversion might change the actual day if the
habit was entered 'early' or 'late' in the day. This is correct because
the user entered the habit in their own timezone, but the app stores this
internally (and exports) in utc. So, here we are effectively converting
the time back to when the user actually entered it, based on the timezone
the user claims they were in.
"""
# We know habit list stores in UTC so don't need the timezone info
dt = dt.split('+')[0].strip()
dtime_obj = datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
# Tell native datetime object we are using UTC, then we need to convert
# that UTC time into the user's timezone BEFORE stripping off the time
# to make sure the year, month, and date take into account timezone
# differences.
utc = dtime_obj.replace(tzinfo=utc_time_zone)
return utc.astimezone(user_time_zone)
def _habits_to_markdown(habits):
"""Create markdown list of habits"""
# FIXME: This is inefficient but not sure of a good way to use join since
# we want to add a chacter to the beginning and end of each string in list.
markdown = ''
for habit, dt_obj in habits:
markdown += '- [%02d:%02d] %s\n' % (dt_obj.hour, dt_obj.minute, habit)
return markdown
def create_habitlist_entry(directory, day_str, habits, verbose):
"""Create day one file entry for given habits, date pair"""
# Create unique uuid without any specific machine information
# (uuid() vs. uuid()) and strip any '-' characters to be
# consistent with dayone format.
uuid_str = re.sub('-', '', str(uuid.uuid4()))
file_name = '%s.doentry' % (uuid_str)
full_file_name = os.path.join(directory, file_name)
date = convert_to_dayone_date_string(day_str)
habits = _habits_to_markdown(habits)
entry = {'entry_title': HEADER_FOR_DAYONE_ENTRIES,
'habits': habits,'date': date, 'uuid_str': uuid_str}
with open(full_file_name, 'w') as file_obj:
text = ENTRY_TEMPLATE.format(**entry)
file_obj.write(text)
if verbose:
print 'Created entry for %s: %s' % (date, file_name)
def parse_habits_file(filename, start_date=None):
"""
Parse habits json file and return dict of data organized by day
start_date can be a datetime object used only to return habits that were
started on or after start_date
"""
with open(filename, 'r') as file_obj:
# FIXME: This expects 3 lines of junk at the beginning of the file, but
# we could just read until we find '[' and ignore up until that point.
junk = file_obj.readline()
junk = file_obj.readline()
junk = file_obj.readline()
# FIXME: For my sample this is about 27kb of memory
_json = file_obj.read()
# FIXME: Downside here is that we assume the user was in the same timezone
# for every habit. However, it's possible that some of the habits were
# entered while the user was traveling in a different timezone, etc.
iphone_time_zone = _user_time_zone()
utc_time_zone = tz.gettz('UTC')
# Use a set b/c we can only do each habit once a day
habits = collections.defaultdict(set)
# FIXME: Maybe optimize this to not hold it all in memory
# We have to parse all json and return it b/c the data is organized by
# habit and we need it organized by date. So, we can't use a generator or
# anything to yield values as they come b/c we won't know if we've parsed
# the entire day until all JSON is parsed.
# FIXME: Should have something to catch ValueError exceptions around this
# so we can show the line with the error if something is wrong.
for habit in json.loads(_json):
name = habit['name']
for dt in habit['completed']:
dt_obj = _user_time_zone_date(dt, iphone_time_zone, utc_time_zone)
if start_date is None or dt_obj >= start_date:
# Habits will be organized by day then each one will have it's
# own time.
day_str = dt_obj.strftime('%Y-%m-%d')
habits[day_str].add((name, dt_obj))
return habits
def main():
args = _parse_args()
if args['test']:
directory = './test'
try:
os.mkdir(directory)
except OSError as err:
print 'Warning: %s' % (err)
else:
directory = DAYONE_ENTRIES
habits = parse_habits_file(args['input_file'], args['since'])
for day_str, days_habits in habits.iteritems():
create_habitlist_entry(directory, day_str, days_habits, args['verbose'])
if __name__ == '__main__':
main()
|
mit
| -1,002,534,995,162,792,200 | -5,617,778,238,840,262,000 | 35.315789 | 102 | 0.648792 | false |
memtoko/django
|
django/template/loaders/cached.py
|
20
|
3031
|
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import Template, TemplateDoesNotExist
from django.utils.encoding import force_bytes
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def cache_key(self, template_name, template_dirs):
if template_dirs:
# If template directories were specified, use a hash to differentiate
return '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()])
else:
return template_name
def find_template(self, name, dirs=None):
"""
Helper method. Lookup the template :param name: in all the configured loaders
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = self.engine.make_origin(display_name, loader, name, dirs)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear()
|
bsd-3-clause
| 1,969,149,278,046,936,300 | -328,674,086,071,408,640 | 38.363636 | 108 | 0.604091 | false |
jakevdp/pelican-plugins
|
static_comments/static_comments.py
|
72
|
1216
|
# -*- coding: utf-8 -*-
import codecs
import logging
import markdown
import os
logger = logging.getLogger(__name__)
from pelican import signals
def initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
DEFAULT_CONFIG.setdefault('STATIC_COMMENTS', False)
DEFAULT_CONFIG.setdefault('STATIC_COMMENTS_DIR' 'comments')
if pelican:
pelican.settings.setdefault('STATIC_COMMENTS', False)
pelican.settings.setdefault('STATIC_COMMENTS_DIR', 'comments')
def add_static_comments(gen, metadata):
if gen.settings['STATIC_COMMENTS'] != True:
return
if not 'slug' in metadata:
logger.warning("static_comments: "
"cant't locate comments file without slug tag in the article")
return
fname = os.path.join(gen.settings['STATIC_COMMENTS_DIR'],
metadata['slug'] + ".md")
if not os.path.exists(fname):
return
input_file = codecs.open(fname, mode="r", encoding="utf-8")
text = input_file.read()
html = markdown.markdown(text)
metadata['static_comments'] = html
def register():
signals.initialized.connect(initialized)
signals.article_generator_context.connect(add_static_comments)
|
agpl-3.0
| -4,967,428,531,912,292,000 | -1,118,495,116,645,418,200 | 25.434783 | 78 | 0.675987 | false |
log2timeline/dfvfs
|
dfvfs/vfs/apfs_file_entry.py
|
2
|
8402
|
# -*- coding: utf-8 -*-
"""The APFS file entry implementation."""
from dfdatetime import apfs_time as dfdatetime_apfs_time
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import apfs_path_spec
from dfvfs.vfs import file_entry
class APFSDirectory(file_entry.Directory):
"""File system directory that uses pyfsapfs."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
APFSPathSpec: APFS path specification.
"""
try:
fsapfs_file_entry = self._file_system.GetAPFSFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
return
location = getattr(self.path_spec, 'location', None)
for fsapfs_sub_file_entry in fsapfs_file_entry.sub_file_entries:
directory_entry = fsapfs_sub_file_entry.name
if not location or location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield apfs_path_spec.APFSPathSpec(
identifier=fsapfs_sub_file_entry.identifier, location=directory_entry,
parent=self.path_spec.parent)
class APFSFileEntry(file_entry.FileEntry):
"""File system file entry that uses pyfsapfs."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_APFS
# Mappings of APFS file types to dfVFS file entry types.
_ENTRY_TYPES = {
0x1000: definitions.FILE_ENTRY_TYPE_PIPE,
0x2000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x4000: definitions.FILE_ENTRY_TYPE_DIRECTORY,
0x6000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x8000: definitions.FILE_ENTRY_TYPE_FILE,
0xa000: definitions.FILE_ENTRY_TYPE_LINK,
0xc000: definitions.FILE_ENTRY_TYPE_SOCKET,
0xe000: definitions.FILE_ENTRY_TYPE_WHITEOUT}
def __init__(
self, resolver_context, file_system, path_spec, fsapfs_file_entry=None,
is_root=False, is_virtual=False):
"""Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
fsapfs_file_entry (Optional[pyfsapfs.file_entry]): APFS file entry.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
entry emulated by the corresponding file system.
Raises:
BackEndError: if the pyfsapfs file entry is missing.
"""
if not fsapfs_file_entry:
fsapfs_file_entry = file_system.GetAPFSFileEntryByPathSpec(path_spec)
if not fsapfs_file_entry:
raise errors.BackEndError('Missing pyfsapfs file entry.')
super(APFSFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._fsapfs_file_entry = fsapfs_file_entry
self.entry_type = self._ENTRY_TYPES.get(
fsapfs_file_entry.file_mode & 0xf000, None)
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
APFSDirectory: a directory.
"""
if self._directory is None:
self._directory = APFSDirectory(self._file_system, self.path_spec)
return self._directory
def _GetLink(self):
"""Retrieves the link.
Returns:
str: path of the linked file.
"""
if self._link is None:
self._link = self._fsapfs_file_entry.symbolic_link_target
if self._link and self._link[0] != self._file_system.PATH_SEPARATOR:
# TODO: make link absolute.
self._link = '/{0:s}'.format(self._link)
return self._link
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(APFSFileEntry, self)._GetStat()
# Ownership and permissions stat information.
stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsapfs_file_entry.owner_identifier
stat_object.gid = self._fsapfs_file_entry.group_identifier
# Other stat information.
stat_object.ino = self._fsapfs_file_entry.identifier
stat_object.fs_type = 'APFS'
return stat_object
def _GetSubFileEntries(self):
"""Retrieves a sub file entries generator.
Yields:
APFSFileEntry: a sub file entry.
"""
if self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY:
directory = self._GetDirectory()
for path_spec in directory.entries:
yield APFSFileEntry(
self._resolver_context, self._file_system, path_spec)
@property
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsapfs_file_entry.get_access_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def added_time(self):
"""dfdatetime.DateTimeValues: added time or None if not available."""
timestamp = self._fsapfs_file_entry.get_added_time_as_integer()
if timestamp is None:
return None
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def change_time(self):
"""dfdatetime.DateTimeValues: change time or None if not available."""
timestamp = self._fsapfs_file_entry.get_inode_change_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def creation_time(self):
"""dfdatetime.DateTimeValues: creation time or None if not available."""
timestamp = self._fsapfs_file_entry.get_creation_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsapfs_file_entry.get_modification_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def name(self):
"""str: name of the file entry, which does not include the full path."""
# The root directory file name is typically 'root', dfVFS however uses ''.
if self._is_root:
return ''
return self._fsapfs_file_entry.name
@property
def size(self):
"""int: size of the file entry in bytes or None if not available."""
return self._fsapfs_file_entry.size
def GetAPFSFileEntry(self):
"""Retrieves the APFS file entry.
Returns:
pyfsapfs.file_entry: APFS file entry.
"""
return self._fsapfs_file_entry
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(
link == self._file_system.LOCATION_ROOT or
link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
APFSFileEntry: parent file entry or None if not available.
"""
parent_location = None
location = getattr(self.path_spec, 'location', None)
if location is not None:
parent_location = self._file_system.DirnamePath(location)
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
parent_identifier = self._fsapfs_file_entry.parent_identifier
if parent_identifier is None:
return None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=parent_location, identifier=parent_identifier,
parent=parent_path_spec)
is_root = bool(
parent_location == self._file_system.LOCATION_ROOT or
parent_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
|
apache-2.0
| 12,656,503,410,522,924 | 7,027,773,100,204,883,000 | 31.820313 | 80 | 0.685075 | false |
theflofly/tensorflow
|
tensorflow/python/keras/keras_parameterized.py
|
10
|
12005
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import unittest
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class TestCase(test.TestCase, parameterized.TestCase):
def tearDown(self):
keras.backend.clear_session()
super(TestCase, self).tearDown()
# TODO(kaftan): Possibly enable 'subclass_custom_build' when tests begin to pass
# it. Or perhaps make 'subclass' always use a custom build method.
def run_with_all_model_types(
test_or_class=None,
exclude_models=None):
"""Execute the decorated test with all Keras model types.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times - once
for each Keras model type.
The Keras model types are: ['functional', 'subclass', 'sequential']
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active Keras model type. This allows unittests to confirm
the equivalence between different Keras models.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_with_all_model_types(
exclude_models = ['sequential'])
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test tries building a small mlp as both a functional model and as a
subclass model.
We can also annotate the whole class if we want this to apply to all tests in
the class:
```python
@testing_utils.run_with_all_model_types(exclude_models = ['sequential'])
class MyTests(testing_utils.KerasTestCase):
def test_foo(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics)
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
exclude_models: A collection of Keras model types to not run.
(May also be a single model type not wrapped in a collection).
Defaults to None.
Returns:
Returns a decorator that will run the decorated test method multiple times:
once for each desired Keras model type.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
model_types = ['functional', 'subclass', 'sequential']
params = [('_%s' % model, model) for model in model_types
if model not in nest.flatten(exclude_models)]
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, model_type, *args, **kwargs):
"""A run of a single test case w/ the specified model type."""
if model_type == 'functional':
_test_functional_model_type(f, self, *args, **kwargs)
elif model_type == 'subclass':
_test_subclass_model_type(f, self, *args, **kwargs)
elif model_type == 'sequential':
_test_sequential_model_type(f, self, *args, **kwargs)
else:
raise ValueError('Unknown model type: %s' % (model_type,))
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _test_functional_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('functional'):
f(test_or_class, *args, **kwargs)
def _test_subclass_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('subclass'):
f(test_or_class, *args, **kwargs)
def _test_sequential_model_type(f, test_or_class, *args, **kwargs):
with testing_utils.model_type_scope('sequential'):
f(test_or_class, *args, **kwargs)
def run_all_keras_modes(
test_or_class=None,
config=None,
always_skip_v1=False):
"""Execute the decorated test with all keras execution modes.
This decorator is intended to be applied either to individual test methods in
a `keras_parameterized.TestCase` class, or directly to a test class that
extends it. Doing so will cause the contents of the individual test
method (or all test methods in the class) to be executed multiple times -
once executing in legacy graph mode, once running eagerly and with
`should_run_eagerly` returning True, and once running eagerly with
`should_run_eagerly` returning False.
If Tensorflow v2 behavior is enabled, legacy graph mode will be skipped, and
the test will only run twice.
Note: if stacking this decorator with absl.testing's parameterized decorators,
those should be at the bottom of the stack.
For example, consider the following unittest:
```python
class MyTests(testing_utils.KerasTestCase):
@testing_utils.run_all_keras_modes
def test_foo(self):
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
if __name__ == "__main__":
tf.test.main()
```
This test will try compiling & fitting the small functional mlp using all
three Keras execution modes.
Args:
test_or_class: test method or class to be annotated. If None,
this method returns a decorator that can be applied to a test method or
test class. If it is not None this returns the decorator applied to the
test or class.
config: An optional config_pb2.ConfigProto to use to configure the
session when executing graphs.
always_skip_v1: If True, does not try running the legacy graph mode even
when Tensorflow v2 behavior is not enabled.
Returns:
Returns a decorator that will run the decorated test method multiple times.
Raises:
ImportError: If abseil parameterized is not installed or not included as
a target dependency.
"""
params = [('_v2_eager', 'v2_eager'),
('_v2_function', 'v2_function')]
if not (always_skip_v1 or tf2.enabled()):
params.append(('_v1_graph', 'v1_graph'))
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
# Use named_parameters so it can be individually run from the command line
@parameterized.named_parameters(*params)
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
"""A run of a single test case w/ specified run mode."""
if run_mode == 'v1_graph':
_v1_graph_test(f, self, config, *args, **kwargs)
elif run_mode == 'v2_function':
_v2_graph_functions_test(f, self, *args, **kwargs)
elif run_mode == 'v2_eager':
_v2_eager_test(f, self, *args, **kwargs)
else:
return ValueError('Unknown run mode %s' % run_mode)
return decorated
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _v1_graph_test(f, test_or_class, config, *args, **kwargs):
with context.graph_mode(), testing_utils.run_eagerly_scope(False):
with test_or_class.test_session(use_gpu=True, config=config):
f(test_or_class, *args, **kwargs)
def _v2_graph_functions_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(False):
f(test_or_class, *args, **kwargs)
def _v2_eager_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(True):
f(test_or_class, *args, **kwargs)
def _test_or_class_decorator(test_or_class, single_method_decorator):
"""Decorate a test or class with a decorator intended for one method.
If the test_or_class is a class:
This will apply the decorator to all test methods in the class.
If the test_or_class is an iterable of already-parameterized test cases:
This will apply the decorator to all the cases, and then flatten the
resulting cross-product of test cases. This allows stacking the Keras
parameterized decorators w/ each other, and to apply them to test methods
that have already been marked with an absl parameterized decorator.
Otherwise, treat the obj as a single method and apply the decorator directly.
Args:
test_or_class: A test method (that may have already been decorated with a
parameterized decorator, or a test class that extends
keras_parameterized.TestCase
single_method_decorator:
A parameterized decorator intended for a single test method.
Returns:
The decorated result.
"""
def _decorate_test_or_class(obj):
if isinstance(obj, collections.Iterable):
return itertools.chain.from_iterable(
single_method_decorator(method) for method in obj)
if isinstance(obj, type):
cls = obj
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix):
setattr(cls, name, single_method_decorator(value))
cls = type(cls).__new__(type(cls), cls.__name__, cls.__bases__,
cls.__dict__.copy())
return cls
return single_method_decorator(obj)
if test_or_class is not None:
return _decorate_test_or_class(test_or_class)
return _decorate_test_or_class
|
apache-2.0
| -8,742,774,779,153,391,000 | -8,239,886,662,384,510,000 | 35.378788 | 80 | 0.689713 | false |
proversity-org/edx-platform
|
lms/djangoapps/class_dashboard/urls.py
|
18
|
1499
|
"""
Class Dashboard API endpoint urls.
"""
from django.conf import settings
from django.conf.urls import url
import class_dashboard.views
import class_dashboard.dashboard_data
COURSE_ID_PATTERN = settings.COURSE_ID_PATTERN
urlpatterns = [
# Json request data for metrics for entire course
url(r'^{}/all_sequential_open_distrib$'.format(settings.COURSE_ID_PATTERN),
class_dashboard.views.all_sequential_open_distrib, name="all_sequential_open_distrib"),
url(r'^{}/all_problem_grade_distribution$'.format(settings.COURSE_ID_PATTERN),
class_dashboard.views.all_problem_grade_distribution, name="all_problem_grade_distribution"),
# Json request data for metrics for particular section
url(r'^{}/problem_grade_distribution/(?P<section>\d+)$'.format(settings.COURSE_ID_PATTERN),
class_dashboard.views.section_problem_grade_distrib, name="section_problem_grade_distrib"),
# For listing students that opened a sub-section
url(r'^get_students_opened_subsection$',
class_dashboard.dashboard_data.get_students_opened_subsection, name="get_students_opened_subsection"),
# For listing of students' grade per problem
url(r'^get_students_problem_grades$',
class_dashboard.dashboard_data.get_students_problem_grades, name="get_students_problem_grades"),
# For generating metrics data as a csv
url(r'^post_metrics_data_csv_url',
class_dashboard.dashboard_data.post_metrics_data_csv, name="post_metrics_data_csv"),
]
|
agpl-3.0
| -7,273,507,635,869,606,000 | -141,282,943,870,913,630 | 41.828571 | 110 | 0.73449 | false |
jezdez/kuma
|
vendor/packages/translate/storage/test_monolingual.py
|
22
|
1421
|
#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# These test classes should be used as super class of test classes for the
# classes that doesn't support the target property
from translate.storage import base, test_base
class TestMonolingualUnit(test_base.TestTranslationUnit):
UnitClass = base.TranslationUnit
def test_target(self):
pass
def test_rich_get(self):
pass
def test_rich_set(self):
pass
class TestMonolingualStore(test_base.TestTranslationStore):
StoreClass = base.TranslationStore
def test_translate(self):
pass
def test_markup(self):
pass
def test_nonascii(self):
pass
def check_equality(self, store1, store2):
"""Check that store1 and store2 are the same."""
assert len(store1.units) == len(store2.units)
for n, store1unit in enumerate(store1.units):
store2unit = store2.units[n]
if str(store1unit) != str(store2unit):
print("match failed between elements %d of %d" % (n+1, len(store1.units)))
print("store1:")
print(str(store1))
print("store2:")
print(str(store2))
print("store1.units[%d].__dict__:" % n, store1unit.__dict__)
print("store2.units[%d].__dict__:" % n, store2unit.__dict__)
assert str(store1unit) == str(store2unit)
|
mpl-2.0
| 3,143,544,378,069,948,400 | 845,072,356,388,717,600 | 26.862745 | 90 | 0.597467 | false |
ehfeng/pipet
|
pipet/api/views.py
|
2
|
1357
|
from flask import Blueprint, request
from pipet.sources import SCHEMANAME, Event, Group, Identity, Page
from pipet.sources.api.tasks import process_event, process_page
blueprint = Blueprint(SCHEMANAME, __name__)
@blueprint.route('/identity', methods=['PUT'])
def identity():
organization = Organization.query.filter_by(name=request.authorization.username,
api_key=request.authorization.password).first()
return
@blueprint.route('/group', methods=['PUT'])
def group():
organization = Organization.query.filter_by(name=request.authorization.username,
api_key=request.authorization.password).first()
return
@blueprint.route('/event', methods=['POST'])
def event():
organization = Organization.query.filter_by(name=request.authorization.username,
api_key=request.authorization.password).first()
data = request.get_json()
process_event.delay(organization.id, data)
@blueprint.route('/page', methods=['POST'])
def page():
organization = Organization.query.filter_by(name=request.authorization.username,
api_key=request.authorization.password).first()
data = request.get_json()
process_event.delay(organization.id, data)
|
mit
| -5,284,387,331,139,430,000 | 6,084,682,551,851,225,000 | 35.675676 | 95 | 0.644068 | false |
gtcasl/eiger
|
Eiger.py
|
1
|
20400
|
#!/usr/bin/python
#
# \file Eiger.py
# \author Eric Anger <[email protected]>
# \date July 6, 2012
#
# \brief Command line interface into Eiger modeling framework
#
# \changes Added more plot functionality; Benjamin Allan, SNL 5/2013
#
import argparse
import matplotlib.pyplot as plt
import numpy as np
import math
import tempfile
import shutil
import os
from ast import literal_eval
import json
import sys
from collections import namedtuple
from tabulate import tabulate
from sklearn.cluster import KMeans
from eiger import database, PCA, LinearRegression
Model = namedtuple('Model', ['metric_names', 'means', 'stdevs',
'rotation_matrix', 'kmeans', 'models'])
def import_model(args):
database.addModelFromFile(args.database, args.file, args.source_name, args.description)
def export_model(args):
database.dumpModelToFile(args.database, args.file, args.id)
def list_models(args):
all_models = database.getModels(args.database)
print tabulate(all_models, headers=['ID', 'Description', 'Created', 'Source'])
def trainModel(args):
print "Training the model..."
training_DC = database.DataCollection(args.training_dc, args.database)
try:
performance_metric_id = [m[0] for m in training_DC.metrics].index(args.target)
except ValueError:
print "Unable to find target metric '%s', " \
"please specify a valid one: " % (args.target,)
for (my_name,my_desc,my_type) in training_DC.metrics:
print "\t%s" % (my_name,)
return
training_performance = training_DC.profile[:,performance_metric_id]
metric_names = [m[0] for m in training_DC.metrics if m[0] != args.target]
if args.predictor_metrics != None:
metric_names = filter(lambda x: x in args.predictor_metrics, metric_names)
metric_ids = [[m[0] for m in training_DC.metrics].index(n) for n in metric_names]
if not metric_ids:
print "Unable to make model for empty data collection. Aborting..."
return
training_profile = training_DC.profile[:,metric_ids]
#pca
training_pca = PCA.PCA(training_profile)
nonzero_components = training_pca.nonzeroComponents()
rotation_matrix = training_pca.components[:,nonzero_components]
rotated_training_profile = np.dot(training_profile, rotation_matrix)
#kmeans
n_clusters = args.clusters
kmeans = KMeans(n_clusters)
means = np.mean(rotated_training_profile, axis=0)
stdevs = np.std(rotated_training_profile - means, axis=0, ddof=1)
stdevs[stdevs==0.0] = 1.0
clusters = kmeans.fit_predict((rotated_training_profile - means)/stdevs)
# reserve a vector for each model created per cluster
models = [0] * len(clusters)
print "Modeling..."
for i in range(n_clusters):
cluster_profile = rotated_training_profile[clusters==i,:]
cluster_performance = training_performance[clusters==i]
regression = LinearRegression.LinearRegression(cluster_profile,
cluster_performance)
pool = [LinearRegression.identityFunction()]
for col in range(cluster_profile.shape[1]):
if('inv_quadratic' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -2))
if('inv_linear' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -1))
if('inv_sqrt' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, -.5))
if('sqrt' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, .5))
if('linear' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, 1))
if('quadratic' in args.regressor_functions):
pool.append(LinearRegression.powerFunction(col, 2))
if('log' in args.regressor_functions):
pool.append(LinearRegression.logFunction(col))
if('cross' in args.regressor_functions):
for xcol in range(col, cluster_profile.shape[1]):
pool.append(LinearRegression.crossFunction(col, xcol))
if('div' in args.regressor_functions):
for xcol in range(col, cluster_profile.shape[1]):
pool.append(LinearRegression.divFunction(col,xcol))
pool.append(LinearRegression.divFunction(xcol,col))
(models[i], r_squared, r_squared_adj) = regression.select(pool,
threshold=args.threshold,
folds=args.nfolds)
print "Index\tMetric Name"
print '\n'.join("%s\t%s" % metric for metric in enumerate(metric_names))
print "PCA matrix:"
print rotation_matrix
print "Model:\n" + str(models[i])
print "Finished modeling cluster %s:" % (i,)
print "r squared = %s" % (r_squared,)
print "adjusted r squared = %s" % (r_squared_adj,)
model = Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
# if we want to save the model file, copy it now
outfilename = training_DC.name + '.model' if args.output == None else args.output
if args.json == True:
writeToFileJSON(model, outfilename)
else:
writeToFile(model, outfilename)
if args.test_fit:
args.experiment_dc = args.training_dc
args.model = outfilename
testModel(args)
def dumpCSV(args):
training_DC = database.DataCollection(args.training_dc, args.database)
names = [met[0] for met in training_DC.metrics]
if args.metrics != None:
names = args.metrics
header = ','.join(names)
idxs = training_DC.metricIndexByName(names)
profile = training_DC.profile[:,idxs]
outfile = sys.stdout if args.output == None else args.output
np.savetxt(outfile, profile, delimiter=',',
header=header, comments='')
def testModel(args):
print "Testing the model fit..."
test_DC = database.DataCollection(args.experiment_dc, args.database)
model = readFile(args.model)
_runExperiment(model.kmeans, model.means, model.stdevs, model.models,
model.rotation_matrix, test_DC,
args, model.metric_names)
def readFile(infile):
with open(infile, 'r') as modelfile:
first_char = modelfile.readline()[0]
if first_char == '{':
return readJSONFile(infile)
else:
return readBespokeFile(infile)
def plotModel(args):
print "Plotting model..."
model = readFile(args.model)
if args.plot_pcs_per_metric:
PCA.PlotPCsPerMetric(rotation_matrix, metric_names,
title="PCs Per Metric")
if args.plot_metrics_per_pc:
PCA.PlotMetricsPerPC(rotation_matrix, metric_names,
title="Metrics Per PC")
def _stringToArray(string):
"""
Parse string of form [len](number,number,number,...) to a numpy array.
"""
length = string[:string.find('(')]
values = string[string.find('('):]
arr = np.array(literal_eval(values))
return np.reshape(arr, literal_eval(length))
def _runExperiment(kmeans, means, stdevs, models, rotation_matrix,
experiment_DC, args, metric_names):
unordered_metric_ids = experiment_DC.metricIndexByType('deterministic',
'nondeterministic')
unordered_metric_names = [experiment_DC.metrics[mid][0] for mid in unordered_metric_ids]
# make sure all metric_names are in experiment_DC.metrics[:][0]
have_metrics = [x in unordered_metric_names for x in metric_names]
if not all(have_metrics):
print("Experiment DC does not have matching metrics. Aborting...")
return
# set the correct ordering
expr_metric_ids = [unordered_metric_ids[unordered_metric_names.index(name)]
for name in metric_names]
for idx,metric in enumerate(experiment_DC.metrics):
if(metric[0] == args.target):
performance_metric_id = idx
performance = experiment_DC.profile[:,performance_metric_id]
profile = experiment_DC.profile[:,expr_metric_ids]
rotated_profile = np.dot(profile, rotation_matrix)
means = np.mean(rotated_profile, axis=0)
stdevs = np.std(rotated_profile - means, axis=0, ddof=1)
stdevs = np.nan_to_num(stdevs)
stdevs[stdevs==0.0] = 1.0
clusters = kmeans.predict((rotated_profile - means)/stdevs)
prediction = np.empty_like(performance)
for i in range(len(kmeans.cluster_centers_)):
prediction[clusters==i] = abs(models[i].poll(rotated_profile[clusters==i]))
if args.show_prediction:
print "Actual\t\tPredicted"
print '\n'.join("%s\t%s" % x for x in zip(performance,prediction))
mse = sum([(a-p)**2 for a,p in
zip(performance, prediction)]) / len(performance)
rmse = math.sqrt(mse)
mape = 100 * sum([abs((a-p)/a) for a,p in
zip(performance,prediction)]) / len(performance)
print "Number of experiment trials: %s" % len(performance)
print "Mean Average Percent Error: %s" % mape
print "Mean Squared Error: %s" % mse
print "Root Mean Squared Error: %s" % rmse
def writeToFileJSON(model, outfile):
# Let's assume model has all the attributes we care about
json_root = {}
json_root["metric_names"] = [name for name in model.metric_names]
json_root["means"] = [mean for mean in model.means.tolist()]
json_root["std_devs"] = [stdev for stdev in model.stdevs.tolist()]
json_root["rotation_matrix"] = [[elem for elem in row] for row in model.rotation_matrix.tolist()]
json_root["clusters"] = []
for i in range(len(model.kmeans.cluster_centers_)):
json_cluster = {}
json_cluster["center"] = [center for center in model.kmeans.cluster_centers_[i].tolist()]
# get models in json format
json_cluster["regressors"] = model.models[i].toJSONObject()
json_root["clusters"].append(json_cluster)
with open(outfile, 'w') as out:
json.dump(json_root, out, indent=4)
def readJSONFile(infile):
with open(infile, 'r') as modelfile:
json_root = json.load(modelfile)
metric_names = json_root['metric_names']
means = np.array(json_root['means'])
stdevs = np.array(json_root['std_devs'])
rotation_matrix = np.array(json_root['rotation_matrix'])
empty_kmeans = KMeans(n_clusters=len(json_root['clusters']), n_init=1)
centers = []
models = []
for cluster in json_root['clusters']:
centers.append(np.array(cluster['center']))
models.append(LinearRegression.Model.fromJSONObject(cluster['regressors']))
kmeans = empty_kmeans.fit(centers)
return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
def writeToFile(model, outfile):
with open(outfile, 'w') as modelfile:
# For printing the original model file encoding
modelfile.write("%s\n%s\n" % (len(model.metric_names), '\n'.join(model.metric_names)))
modelfile.write("[%s](%s)\n" %
(len(model.means), ','.join([str(mean) for mean in model.means.tolist()])))
modelfile.write("[%s](%s)\n" %
(len(model.stdevs), ','.join([str(stdev) for stdev in model.stdevs.tolist()])))
modelfile.write("[%s,%s]" % model.rotation_matrix.shape)
modelfile.write("(%s)\n" %
','.join(["(%s)" %
','.join([str(elem) for elem in row])
for row in model.rotation_matrix.tolist()]))
for i in range(len(model.kmeans.cluster_centers_)):
modelfile.write('Model %s\n' % i)
modelfile.write("[%s](%s)\n" % (model.rotation_matrix.shape[1],
','.join([str(center) for center in
model.kmeans.cluster_centers_[i].tolist()])))
modelfile.write(repr(model.models[i]))
modelfile.write('\n') # need a trailing newline
def readBespokeFile(infile):
"""Returns a Model namedtuple with all the model parts"""
with open(infile, 'r') as modelfile:
lines = iter(modelfile.read().splitlines())
n_params = int(lines.next())
metric_names = [lines.next() for i in range(n_params)]
means = _stringToArray(lines.next())
stdevs = _stringToArray(lines.next())
rotation_matrix = _stringToArray(lines.next())
models = []
centroids = []
try:
while True:
name = lines.next() # kill a line
centroids.append(_stringToArray(lines.next()))
weights = _stringToArray(lines.next())
functions = [LinearRegression.stringToFunction(lines.next())
for i in range(weights.shape[0])]
models.append(LinearRegression.Model(functions, weights))
except StopIteration:
pass
kmeans = KMeans(len(centroids))
kmeans.cluster_centers_ = np.array(centroids)
return Model(metric_names, means, stdevs, rotation_matrix, kmeans, models)
def convert(args):
print "Converting model..."
with open(args.input, 'r') as modelfile:
first_char = modelfile.readline()[0]
if first_char == '{':
model = readJSONFile(args.input)
writeToFile(model, args.output)
else:
model = readBespokeFile(args.input)
writeToFileJSON(model, args.output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = \
'Command line interface into Eiger performance modeling framework \
for all model generation, polling, and serialization tasks.',
argument_default=None,
fromfile_prefix_chars='@')
subparsers = parser.add_subparsers(title='subcommands')
train_parser = subparsers.add_parser('train',
help='train a model with data from the database',
description='Train a model with data from the database')
train_parser.set_defaults(func=trainModel)
dump_parser = subparsers.add_parser('dump',
help='dump data collection to CSV',
description='Dump data collection as CSV')
dump_parser.set_defaults(func=dumpCSV)
test_parser = subparsers.add_parser('test',
help='test how well a model predicts a data collection',
description='Test how well a model predicts a data collection')
test_parser.set_defaults(func=testModel)
plot_parser = subparsers.add_parser('plot',
help='plot the behavior of a model',
description='Plot the behavior of a model')
plot_parser.set_defaults(func=plotModel)
convert_parser = subparsers.add_parser('convert',
help='transform a model into a different file format',
description='Transform a model into a different file format')
convert_parser.set_defaults(func=convert)
list_model_parser = subparsers.add_parser('list',
help='list available models in the Eiger DB',
description='List available models in the Eiger DB')
list_model_parser.set_defaults(func=list_models)
import_model_parser = subparsers.add_parser('import',
help='import model file into the Eiger DB',
description='Import model file into the Eiger DB')
import_model_parser.set_defaults(func=import_model)
export_model_parser = subparsers.add_parser('export',
help='export model from Eiger DB to file',
description='Export model from Eiger DB to file')
export_model_parser.set_defaults(func=export_model)
"""TRAINING ARGUMENTS"""
train_parser.add_argument('database', type=str, help='Name of the database file')
train_parser.add_argument('training_dc', type=str,
help='Name of the training data collection')
train_parser.add_argument('target', type=str,
help='Name of the target metric to predict')
train_parser.add_argument('--test-fit', action='store_true', default=False,
help='If set will test the model fit against the training data.')
train_parser.add_argument('--show-prediction', action='store_true',
default=False,
help='If set, send the actual and predicted values to stdout.')
train_parser.add_argument('--predictor-metrics', nargs='*',
help='Only use these metrics when building a model.')
train_parser.add_argument('--output', type=str,
help='Filename to output file to, otherwise use "<training_dc>.model"')
train_parser.add_argument('--clusters', '-k', type=int, default=1,
help='Number of clusters for kmeans')
train_parser.add_argument('--threshold', type=float,
help='Cutoff threshold of increase in adjusted R-squared value when'
' adding new predictors to the model')
train_parser.add_argument('--nfolds', type=int,
help='Number of folds to use in k-fold cross validation.')
train_parser.add_argument('--regressor-functions', nargs='*',
default=['inv_quadratic', 'inv_linear', 'inv_sqrt', 'sqrt',
'linear', 'quadratic', 'log', 'cross', 'div'],
help='Regressor functions to use. Options are linear, quadratic, '
'sqrt, inv_linear, inv_quadratic, inv_sqrt, log, cross, and div. '
'Defaults to all.')
train_parser.add_argument('--json', action='store_true', default=False,
help='Output model in JSON format, rather than bespoke')
"""DUMP CSV ARGUMENTS"""
dump_parser.add_argument('database', type=str, help='Name of the database file')
dump_parser.add_argument('training_dc', type=str,
help='Name of the data collection to dump')
dump_parser.add_argument('--metrics', nargs='*',
help='Only dump these metrics.')
dump_parser.add_argument('--output', type=str, help='Name of file to dump CSV to')
"""TEST ARGUMENTS"""
test_parser.add_argument('database', type=str, help='Name of the database file')
test_parser.add_argument('experiment_dc', type=str,
help='Name of the data collection to experiment on')
test_parser.add_argument('model', type=str,
help='Name of the model to use')
test_parser.add_argument('target', type=str,
help='Name of the target metric to predict')
test_parser.add_argument('--show-prediction', action='store_true',
default=False,
help='If set, send the actual and predicted values to stdout.')
"""PLOT ARGUMENTS"""
plot_parser.add_argument('model', type=str,
help='Name of the model to use')
plot_parser.add_argument('--plot-pcs-per-metric', action='store_true',
default=False,
help='If set, plots the breakdown of principal components per metric.')
plot_parser.add_argument('--plot-metrics-per-pc',
action='store_true',
default=False,
help='If set, plots the breakdown of metrics per principal component.')
"""CONVERT ARGUMENTS"""
convert_parser.add_argument('input', type=str,
help='Name of input model to convert from')
convert_parser.add_argument('output', type=str,
help='Name of output model to convert to')
"""LIST ARGUMENTS"""
list_model_parser.add_argument('database', type=str, help='Name of the database file')
"""IMPORT ARGUMENTS"""
import_model_parser.add_argument('database', type=str,
help='Name of the database file')
import_model_parser.add_argument('file', type=str,
help='Name of the model file to import')
import_model_parser.add_argument('source_name', type=str,
help='Name of the source of the model (ie Eiger)')
import_model_parser.add_argument('--description', type=str,
default='',
help='String to describe the model')
"""EXPORT ARGUMENTS"""
export_model_parser.add_argument('database', type=str,
help='Name of the database file')
export_model_parser.add_argument('id', type=int,
help='ID number identifying which model in the database to export ')
export_model_parser.add_argument('file', type=str,
help='Name of the file to export into')
args = parser.parse_args()
args.func(args)
print "Done."
|
bsd-3-clause
| -2,451,031,706,648,389,000 | -4,023,557,494,113,057,000 | 44.033113 | 101 | 0.634265 | false |
akhilari7/pa-dude
|
lib/python2.7/site-packages/pymongo/common.py
|
9
|
18768
|
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import collections
import warnings
from bson.binary import (STANDARD, PYTHON_LEGACY,
JAVA_LEGACY, CSHARP_LEGACY)
from bson.codec_options import CodecOptions
from bson.py3compat import string_type, integer_types, iteritems
from bson.raw_bson import RawBSONDocument
from pymongo.auth import MECHANISMS
from pymongo.errors import ConfigurationError
from pymongo.monitoring import _validate_event_listeners
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import (read_pref_mode_from_name,
_ServerMode)
from pymongo.ssl_support import validate_cert_reqs
from pymongo.write_concern import WriteConcern
# Defaults until we connect to a server and get updated limits.
MAX_BSON_SIZE = 16 * (1024 ** 2)
MAX_MESSAGE_SIZE = 2 * MAX_BSON_SIZE
MIN_WIRE_VERSION = 0
MAX_WIRE_VERSION = 0
MAX_WRITE_BATCH_SIZE = 1000
# What this version of PyMongo supports.
MIN_SUPPORTED_WIRE_VERSION = 0
MAX_SUPPORTED_WIRE_VERSION = 3
# Frequency to call ismaster on servers, in seconds.
HEARTBEAT_FREQUENCY = 10
# Frequency to process kill-cursors, in seconds. See MongoClient.close_cursor.
KILL_CURSOR_FREQUENCY = 1
# How long to wait, in seconds, for a suitable server to be found before
# aborting an operation. For example, if the client attempts an insert
# during a replica set election, SERVER_SELECTION_TIMEOUT governs the
# longest it is willing to wait for a new primary to be found.
SERVER_SELECTION_TIMEOUT = 30
# Spec requires at least 500ms between ismaster calls.
MIN_HEARTBEAT_INTERVAL = 0.5
# Default connectTimeout in seconds.
CONNECT_TIMEOUT = 20.0
# Default value for maxPoolSize.
MAX_POOL_SIZE = 100
# Default value for localThresholdMS.
LOCAL_THRESHOLD_MS = 15
# mongod/s 2.6 and above return code 59 when a
# command doesn't exist. mongod versions previous
# to 2.6 and mongos 2.4.x return no error code
# when a command does exist. mongos versions previous
# to 2.4.0 return code 13390 when a command does not
# exist.
COMMAND_NOT_FOUND_CODES = (59, 13390, None)
# Error codes to ignore if GridFS calls createIndex on a secondary
UNAUTHORIZED_CODES = (13, 16547, 16548)
def partition_node(node):
"""Split a host:port string into (host, int(port)) pair."""
host = node
port = 27017
idx = node.rfind(':')
if idx != -1:
host, port = node[:idx], int(node[idx + 1:])
if host.startswith('['):
host = host[1:-1]
return host, port
def clean_node(node):
"""Split and normalize a node name from an ismaster response."""
host, port = partition_node(node)
# Normalize hostname to lowercase, since DNS is case-insensitive:
# http://tools.ietf.org/html/rfc4343
# This prevents useless rediscovery if "foo.com" is in the seed list but
# "FOO.com" is in the ismaster response.
return host.lower(), port
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
# Mapping of URI uuid representation options to valid subtypes.
_UUID_REPRESENTATIONS = {
'standard': STANDARD,
'pythonLegacy': PYTHON_LEGACY,
'javaLegacy': JAVA_LEGACY,
'csharpLegacy': CSHARP_LEGACY
}
def validate_boolean(option, value):
"""Validates that 'value' is True or False."""
if isinstance(value, bool):
return value
raise TypeError("%s must be True or False" % (option,))
def validate_boolean_or_string(option, value):
"""Validates that value is True, False, 'true', or 'false'."""
if isinstance(value, string_type):
if value not in ('true', 'false'):
raise ValueError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
return validate_boolean(option, value)
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if not value.isdigit():
raise ValueError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer, which does not include 0.
"""
val = validate_integer(option, value)
if val <= 0:
raise ValueError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_non_negative_integer(option, value):
"""Validate that 'value' is a positive integer or 0.
"""
val = validate_integer(option, value)
if val < 0:
raise ValueError("The value of %s must be "
"a non negative integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
if value is None:
return value
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_string(option, value)
open(value, 'r').close()
return value
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_non_negative_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or 0 or None.
"""
if value is None:
return value
return validate_non_negative_integer(option, value)
def validate_string(option, value):
"""Validates that 'value' is an instance of `basestring` for Python 2
or `str` for Python 3.
"""
if isinstance(value, string_type):
return value
raise TypeError("Wrong type for %s, value must be "
"an instance of %s" % (option, string_type.__name__))
def validate_string_or_none(option, value):
"""Validates that 'value' is an instance of `basestring` or `None`.
"""
if value is None:
return value
return validate_string(option, value)
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, integer_types):
return value
elif isinstance(value, string_type):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
errmsg = "%s must be an integer or float" % (option,)
try:
value = float(value)
except ValueError:
raise ValueError(errmsg)
except TypeError:
raise TypeError(errmsg)
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise ValueError("%s must be greater than 0 and "
"less than one billion" % (option,))
return value
def validate_positive_float_or_zero(option, value):
"""Validates that 'value' is 0 or a positive float, or can be converted to
0 or a positive float.
"""
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value)
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_timeout_or_zero(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds for the case where None is an error
and 0 is valid. Setting the timeout to nothing in the URI string is a
config error.
"""
if value is None:
raise ConfigurationError("%s cannot be None" % (option, ))
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate a read preference.
"""
if not isinstance(value, _ServerMode):
raise TypeError("%r is not a read preference." % (value,))
return value
def validate_read_preference_mode(dummy, name):
"""Validate read preference mode for a MongoReplicaSetClient.
"""
try:
return read_pref_mode_from_name(name)
except ValueError:
raise ValueError("%s is not a valid read preference" % (name,))
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
# CRAM-MD5 is for server testing only. Undocumented,
# unsupported, may be removed at any time. You have
# been warned.
if value not in MECHANISMS and value != 'CRAM-MD5':
raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS)))
return value
def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
try:
return _UUID_REPRESENTATIONS[value]
except KeyError:
raise ValueError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, tuple(_UUID_REPRESENTATIONS)))
def validate_read_preference_tags(name, value):
"""Parse readPreferenceTags if passed as a client kwarg.
"""
if not isinstance(value, list):
value = [value]
tag_sets = []
for tag_set in value:
if tag_set == '':
tag_sets.append({})
continue
try:
tag_sets.append(dict([tag.split(":")
for tag in tag_set.split(",")]))
except Exception:
raise ValueError("%r not a valid "
"value for %s" % (tag_set, name))
return tag_sets
_MECHANISM_PROPS = frozenset(['SERVICE_NAME'])
def validate_auth_mechanism_properties(option, value):
"""Validate authMechanismProperties."""
value = validate_string(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
except ValueError:
raise ValueError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
if key not in _MECHANISM_PROPS:
raise ValueError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
props[key] = val
return props
def validate_document_class(option, value):
"""Validate the document_class option."""
if not issubclass(value, (collections.MutableMapping, RawBSONDocument)):
raise TypeError("%s must be dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or a "
"sublass of collections.MutableMapping" % (option,))
return value
def validate_is_mapping(option, value):
"""Validate the type of method arguments that expect a document."""
if not isinstance(value, collections.Mapping):
raise TypeError("%s must be an instance of dict, bson.son.SON, or "
"other type that inherits from "
"collections.Mapping" % (option,))
def validate_is_document_type(option, value):
"""Validate the type of method arguments that expect a MongoDB document."""
if not isinstance(value, (collections.MutableMapping, RawBSONDocument)):
raise TypeError("%s must be an instance of dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or "
"a type that inherits from "
"collections.MutableMapping" % (option,))
def validate_ok_for_replace(replacement):
"""Validate a replacement document."""
validate_is_mapping("replacement", replacement)
# Replacement can be {}
if replacement and not isinstance(replacement, RawBSONDocument):
first = next(iter(replacement))
if first.startswith('$'):
raise ValueError('replacement can not include $ operators')
def validate_ok_for_update(update):
"""Validate an update document."""
validate_is_mapping("update", update)
# Update can not be {}
if not update:
raise ValueError('update only works with $ operators')
first = next(iter(update))
if not first.startswith('$'):
raise ValueError('update only works with $ operators')
# journal is an alias for j,
# wtimeoutms is an alias for wtimeout,
VALIDATORS = {
'replicaset': validate_string_or_none,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean_or_string,
'j': validate_boolean_or_string,
'journal': validate_boolean_or_string,
'connecttimeoutms': validate_timeout_or_none,
'maxpoolsize': validate_positive_integer_or_none,
'socketkeepalive': validate_boolean_or_string,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_non_negative_integer_or_none,
'ssl': validate_boolean_or_string,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'ssl_match_hostname': validate_boolean_or_string,
'readconcernlevel': validate_string_or_none,
'read_preference': validate_read_preference,
'readpreference': validate_read_preference_mode,
'readpreferencetags': validate_read_preference_tags,
'localthresholdms': validate_positive_float_or_zero,
'serverselectiontimeoutms': validate_timeout_or_zero,
'authmechanism': validate_auth_mechanism,
'authsource': validate_string,
'authmechanismproperties': validate_auth_mechanism_properties,
'document_class': validate_document_class,
'tz_aware': validate_boolean_or_string,
'uuidrepresentation': validate_uuid_representation,
'connect': validate_boolean,
'event_listeners': _validate_event_listeners
}
_AUTH_OPTIONS = frozenset(['authmechanismproperties'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
def get_validated_options(options):
"""Validate each entry in options and raise a warning if it is not valid.
Returns a copy of options with invalid entries removed
"""
validated_options = {}
for opt, value in iteritems(options):
lower = opt.lower()
try:
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(opt, value)
except (ValueError, ConfigurationError) as exc:
warnings.warn(str(exc))
else:
validated_options[lower] = value
return validated_options
WRITE_CONCERN_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO MONGODB.
"""
def __init__(self, codec_options, read_preference, write_concern,
read_concern):
if not isinstance(codec_options, CodecOptions):
raise TypeError("codec_options must be an instance of "
"bson.codec_options.CodecOptions")
self.__codec_options = codec_options
if not isinstance(read_preference, _ServerMode):
raise TypeError("%r is not valid for read_preference. See "
"pymongo.read_preferences for valid "
"options." % (read_preference,))
self.__read_preference = read_preference
if not isinstance(write_concern, WriteConcern):
raise TypeError("write_concern must be an instance of "
"pymongo.write_concern.WriteConcern")
self.__write_concern = write_concern
if not isinstance(read_concern, ReadConcern):
raise TypeError("read_concern must be an instance of "
"pymongo.read_concern.ReadConcern")
self.__read_concern = read_concern
@property
def codec_options(self):
"""Read only access to the :class:`~bson.codec_options.CodecOptions`
of this instance.
"""
return self.__codec_options
@property
def write_concern(self):
"""Read only access to the :class:`~pymongo.write_concern.WriteConcern`
of this instance.
.. versionchanged:: 3.0
The :attr:`write_concern` attribute is now read only.
"""
return self.__write_concern
@property
def read_preference(self):
"""Read only access to the read preference of this instance.
.. versionchanged:: 3.0
The :attr:`read_preference` attribute is now read only.
"""
return self.__read_preference
@property
def read_concern(self):
"""Read only access to the read concern of this instance.
.. versionadded:: 3.2
"""
return self.__read_concern
|
mit
| -6,486,844,085,245,087,000 | -7,956,806,132,193,121,000 | 32.57424 | 79 | 0.645514 | false |
vesellov/bitdust.devel
|
customer/data_sender.py
|
1
|
14665
|
#!/usr/bin/python
# data_sender.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (data_sender.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
.. module:: data_sender.
.. raw:: html
<a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank">
<img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;">
</a>
A state machine to manage data sending process, acts very simple:
1) when new local data is created it tries to send it to the correct supplier
2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers
3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send
4) this machine is restarted every minute to check if some more data needs to be send
5) also can be restarted at any time when it is needed
EVENTS:
* :red:`block-acked`
* :red:`block-failed`
* :red:`init`
* :red:`new-data`
* :red:`restart`
* :red:`scan-done`
* :red:`timer-1min`
* :red:`timer-1sec`
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
from io import open
#------------------------------------------------------------------------------
_Debug = True
_DebugLevel = 12
#------------------------------------------------------------------------------
import os
import time
#------------------------------------------------------------------------------
from logs import lg
from automats import automat
from automats import global_state
from lib import misc
from lib import packetid
from contacts import contactsdb
from userid import my_id
from main import settings
from p2p import contact_status
from . import io_throttle
#------------------------------------------------------------------------------
_DataSender = None
_ShutdownFlag = False
#------------------------------------------------------------------------------
def A(event=None, arg=None):
"""
Access method to interact with the state machine.
"""
global _DataSender
if _DataSender is None:
_DataSender = DataSender(
name='data_sender',
state='READY',
debug_level=_DebugLevel,
log_events=_Debug,
log_transitions=_Debug,
)
if event is not None:
_DataSender.automat(event, arg)
return _DataSender
def Destroy():
"""
Destroy the state machine and remove the instance from memory.
"""
global _DataSender
if _DataSender is None:
return
_DataSender.destroy()
del _DataSender
_DataSender = None
class DataSender(automat.Automat):
"""
A class to manage process of sending data packets to remote suppliers.
"""
timers = {
'timer-1min': (60, ['READY']),
'timer-1min': (60, ['READY']),
'timer-1sec': (1.0, ['SENDING']),
}
statistic = {}
def state_changed(self, oldstate, newstate, event, arg):
global_state.set_global_state('DATASEND ' + newstate)
def A(self, event, arg):
#---READY---
if self.state == 'READY':
if event == 'new-data' or event == 'timer-1min' or event == 'restart':
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
elif event == 'init':
pass
#---SCAN_BLOCKS---
elif self.state == 'SCAN_BLOCKS':
if event == 'scan-done' and self.isQueueEmpty(arg):
self.state = 'READY'
self.doRemoveUnusedFiles(arg)
elif event == 'scan-done' and not self.isQueueEmpty(arg):
self.state = 'SENDING'
#---SENDING---
elif self.state == 'SENDING':
if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ):
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
return None
def isQueueEmpty(self, arg):
if not arg:
return io_throttle.IsSendingQueueEmpty()
remoteID, _ = arg
return io_throttle.OkToSend(remoteID)
def doScanAndQueue(self, arg):
global _ShutdownFlag
if _Debug:
lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag)
if _Debug:
log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w')
log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8')
if _ShutdownFlag:
if _Debug:
log.write(u'doScanAndQueue _ShutdownFlag is True\n')
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
return
for customer_idurl in contactsdb.known_customers():
if '' not in contactsdb.suppliers(customer_idurl):
from storage import backup_matrix
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys()), True):
this_customer_idurl = packetid.CustomerIDURL(backupID)
if this_customer_idurl != customer_idurl:
continue
packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID)
if _Debug:
log.write(u'%s\n' % packetsBySupplier)
for supplierNum in packetsBySupplier.keys():
supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl)
if not supplier_idurl:
lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % (
supplierNum, backupID, customer_idurl))
continue
for packetID in packetsBySupplier[supplierNum]:
backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID)
if backupID_ != backupID:
lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if supplierNum_ != supplierNum:
lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if io_throttle.HasPacketInSendQueue(
supplier_idurl, packetID):
if _Debug:
log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl))
continue
if not io_throttle.OkToSend(supplier_idurl):
if _Debug:
log.write(u'skip, not ok to send %s\n' % supplier_idurl)
continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
# tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, [])
# if len(tranByID) > 3:
# log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID)))
# continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(
settings.getLocalBackupsDir(),
customerGlobalID,
pathID,
)
if not os.path.isfile(filename):
if _Debug:
log.write(u'%s is not a file\n' % filename)
continue
if io_throttle.QueueSendFile(
filename,
packetID,
supplier_idurl,
my_id.getLocalID(),
self._packetAcked,
self._packetFailed,
):
if _Debug:
log.write(u'io_throttle.QueueSendFile %s\n' % packetID)
else:
if _Debug:
log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID)
# lg.out(6, ' %s for %s' % (packetID, backupID))
# DEBUG
# break
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
# def doPrintStats(self, arg):
# """
# """
# if lg.is_debug(18):
# transfers = transport_control.current_transfers()
# bytes_stats = transport_control.current_bytes_transferred()
# s = ''
# for info in transfers:
# s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b'))
# lg.out(0, 'transfers: ' + s[:120])
def doRemoveUnusedFiles(self, arg):
# we want to remove files for this block
# because we only need them during rebuilding
if settings.getBackupsKeepLocalCopies() is True:
# if user set this in settings - he want to keep the local files
return
# ... user do not want to keep local backups
if settings.getGeneralWaitSuppliers() is True:
from customer import fire_hire
# but he want to be sure - all suppliers are green for a long time
if len(contact_status.listOfflineSuppliers()) > 0 or time.time(
) - fire_hire.GetLastFireTime() < 24 * 60 * 60:
# some people are not there or we do not have stable team yet
# do not remove the files because we need it to rebuild
return
count = 0
from storage import backup_matrix
from storage import restore_monitor
from storage import backup_rebuilder
if _Debug:
lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys())):
if restore_monitor.IsWorking(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because restoring' %
backupID)
continue
if backup_rebuilder.IsBackupNeedsWork(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because needs rebuilding' %
backupID)
continue
if not backup_rebuilder.ReadStoppedFlag():
if backup_rebuilder.A().currentBackupID is not None:
if backup_rebuilder.A().currentBackupID == backupID:
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because rebuilding is in process' %
backupID)
continue
packets = backup_matrix.ScanBlocksToRemove(
backupID, settings.getGeneralWaitSuppliers())
for packetID in packets:
customer, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID)
if os.path.isfile(filename):
try:
os.remove(filename)
# lg.out(6, ' ' + os.path.basename(filename))
except:
lg.exc()
continue
count += 1
if _Debug:
lg.out(_DebugLevel, ' %d files were removed' % count)
backup_matrix.ReadLocalFiles()
def _packetAcked(self, packet, ownerID, packetID):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, True)
if ownerID not in self.statistic:
self.statistic[ownerID] = [0, 0]
self.statistic[ownerID][0] += 1
self.automat('block-acked', (ownerID, packetID))
def _packetFailed(self, remoteID, packetID, why):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(
packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, False)
if remoteID not in self.statistic:
self.statistic[remoteID] = [0, 0]
self.statistic[remoteID][1] += 1
self.automat('block-failed', (remoteID, packetID))
def statistic():
"""
The ``data_sender()`` keeps track of sending results with every supplier.
This is used by ``fire_hire()`` to decide how reliable is given
supplier.
"""
global _DataSender
if _DataSender is None:
return {}
return _DataSender.statistic
def SetShutdownFlag():
"""
Set flag to indicate that no need to send anything anymore.
"""
global _ShutdownFlag
_ShutdownFlag = True
|
agpl-3.0
| 8,260,489,049,742,042,000 | 7,544,386,103,824,975,000 | 38.422043 | 170 | 0.512104 | false |
ondra-novak/chromium.src
|
build/android/pylib/host_driven/setup.py
|
48
|
6473
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for instrumentation host-driven tests."""
import logging
import os
import sys
import types
from pylib.host_driven import test_case
from pylib.host_driven import test_info_collection
from pylib.host_driven import test_runner
def _GetPythonFiles(root, files):
"""Returns all files from |files| that end in 'Test.py'.
Args:
root: A directory name with python files.
files: A list of file names.
Returns:
A list with all python files that match the testing naming scheme.
"""
return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
def _InferImportNameFromFile(python_file):
"""Given a file, infer the import name for that file.
Example: /usr/foo/bar/baz.py -> baz.
Args:
python_file: Path to the Python file, ostensibly to import later.
Returns:
The module name for the given file.
"""
return os.path.splitext(os.path.basename(python_file))[0]
def _GetTestModules(host_driven_test_root, is_official_build):
"""Retrieve a list of python modules that match the testing naming scheme.
Walks the location of host-driven tests, imports them, and provides the list
of imported modules to the caller.
Args:
host_driven_test_root: The path to walk, looking for the
pythonDrivenTests or host_driven_tests directory
is_official_build: Whether to run only those tests marked 'official'
Returns:
A list of python modules under |host_driven_test_root| which match the
testing naming scheme. Each module should define one or more classes that
derive from HostDrivenTestCase.
"""
# By default run all host-driven tests under pythonDrivenTests or
# host_driven_tests.
host_driven_test_file_list = []
for root, _, files in os.walk(host_driven_test_root):
if (root.endswith('host_driven_tests') or
root.endswith('pythonDrivenTests') or
(is_official_build and (root.endswith('pythonDrivenTests/official') or
root.endswith('host_driven_tests/official')))):
host_driven_test_file_list += _GetPythonFiles(root, files)
host_driven_test_file_list.sort()
test_module_list = [_GetModuleFromFile(test_file)
for test_file in host_driven_test_file_list]
return test_module_list
def _GetModuleFromFile(python_file):
"""Gets the python module associated with a file by importing it.
Args:
python_file: File to import.
Returns:
The module object.
"""
sys.path.append(os.path.dirname(python_file))
import_name = _InferImportNameFromFile(python_file)
return __import__(import_name)
def _GetTestsFromClass(test_case_class, **kwargs):
"""Returns one test object for each test method in |test_case_class|.
Test methods are methods on the class which begin with 'test'.
Args:
test_case_class: Class derived from HostDrivenTestCase which contains zero
or more test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects, each initialized for a particular test method.
"""
test_names = [m for m in dir(test_case_class)
if _IsTestMethod(m, test_case_class)]
return [test_case_class(name, **kwargs) for name in test_names]
def _GetTestsFromModule(test_module, **kwargs):
"""Gets a list of test objects from |test_module|.
Args:
test_module: Module from which to get the set of test methods.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
A list of test case objects each initialized for a particular test method
defined in |test_module|.
"""
tests = []
for name in dir(test_module):
attr = getattr(test_module, name)
if _IsTestCaseClass(attr):
tests.extend(_GetTestsFromClass(attr, **kwargs))
return tests
def _IsTestCaseClass(test_class):
return (type(test_class) is types.TypeType and
issubclass(test_class, test_case.HostDrivenTestCase) and
test_class is not test_case.HostDrivenTestCase)
def _IsTestMethod(attrname, test_case_class):
"""Checks whether this is a valid test method.
Args:
attrname: The method name.
test_case_class: The test case class.
Returns:
True if test_case_class.'attrname' is callable and it starts with 'test';
False otherwise.
"""
attr = getattr(test_case_class, attrname)
return callable(attr) and attrname.startswith('test')
def _GetAllTests(test_root, is_official_build, **kwargs):
"""Retrieve a list of host-driven tests defined under |test_root|.
Args:
test_root: Path which contains host-driven test files.
is_official_build: Whether this is an official build.
kwargs: Keyword args to pass into the constructor of test cases.
Returns:
List of test case objects, one for each available test method.
"""
if not test_root:
return []
all_tests = []
test_module_list = _GetTestModules(test_root, is_official_build)
for module in test_module_list:
all_tests.extend(_GetTestsFromModule(module, **kwargs))
return all_tests
def InstrumentationSetup(host_driven_test_root, official_build,
instrumentation_options):
"""Creates a list of host-driven instrumentation tests and a runner factory.
Args:
host_driven_test_root: Directory where the host-driven tests are.
official_build: True if this is an official build.
instrumentation_options: An InstrumentationOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_collection = test_info_collection.TestInfoCollection()
all_tests = _GetAllTests(
host_driven_test_root, official_build,
instrumentation_options=instrumentation_options)
test_collection.AddTests(all_tests)
available_tests = test_collection.GetAvailableTests(
instrumentation_options.annotations,
instrumentation_options.exclude_annotations,
instrumentation_options.test_filter)
logging.debug('All available tests: ' + str(
[t.tagged_name for t in available_tests]))
def TestRunnerFactory(device, shard_index):
return test_runner.HostDrivenTestRunner(
device, shard_index,
instrumentation_options.tool,
instrumentation_options.push_deps,
instrumentation_options.cleanup_test_files)
return (TestRunnerFactory, available_tests)
|
bsd-3-clause
| 180,784,480,205,407,970 | 9,149,830,119,318,316,000 | 31.044554 | 79 | 0.712035 | false |
agacek/camkes-tool
|
camkes/internal/version.py
|
1
|
1813
|
#
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''Versioning functionality. This computes a version identifier based on the
current source code state. It was decided this was more reliable while the tool
is under active development. Note that any extraneous files in your source
directory that match the version filters will be accumulated in the version
computation.'''
from memoization import memoized
import hashlib, os, re
@memoized
def version():
# Files to consider relevant. Each entry should be a pair of (path, filter)
# where 'path' is relative to the directory of this file and 'filter' is a
# regex describing which filenames to match under the given path.
SOURCES = [
('../', r'^.*\.py$'), # Python sources
('../templates', r'.*'), # Templates
]
my_path = os.path.dirname(os.path.abspath(__file__))
sources = set()
# Accumulate all relevant source files.
for s in SOURCES:
path = os.path.join(my_path, s[0])
regex = re.compile(s[1])
for root, _, files in os.walk(path):
for f in files:
if regex.match(f):
sources.add(os.path.abspath(os.path.join(root, f)))
# Hash each file and hash a concatenation of these hashes. Note, hashing a
# hash is not good practice for cryptography, but it's fine for this
# purpose.
hfinal = hashlib.sha1() #pylint: disable=E1101
for s in sources:
with open(s, 'r') as f:
h = hashlib.sha1(f.read()).hexdigest() #pylint: disable=E1101
hfinal.update('%s|' % h) #pylint: disable=E1101
return hfinal.hexdigest()
|
bsd-2-clause
| -4,817,587,354,060,018,000 | -2,412,940,287,223,092,000 | 35.26 | 79 | 0.656922 | false |
greatmazinger/or-tools
|
examples/python/set_covering.py
|
34
|
2650
|
# Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set covering in Google CP Solver.
Placing of firestations, from Winston 'Operations Research', page 486.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/set_covering.mzn
* ECLiPSe : http://www.hakank.org/eclipse/set_covering.ecl
* Comet : http://www.hakank.org/comet/set_covering.co
* Gecode : http://www.hakank.org/gecode/set_covering.cpp
* SICStus : http://www.hakank.org/sicstus/set_covering.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main(unused_argv):
# Create the solver.
solver = pywrapcp.Solver("Set covering")
#
# data
#
min_distance = 15
num_cities = 6
distance = [
[0, 10, 20, 30, 30, 20],
[10, 0, 25, 35, 20, 10],
[20, 25, 0, 15, 30, 20],
[30, 35, 15, 0, 15, 25],
[30, 20, 30, 15, 0, 14],
[20, 10, 20, 25, 14, 0]
]
#
# declare variables
#
x = [solver.IntVar(0, 1, "x[%i]" % i) for i in range(num_cities)]
#
# constraints
#
# objective to minimize
z = solver.Sum(x)
# ensure that all cities are covered
for i in range(num_cities):
b = [x[j] for j in range(num_cities) if distance[i][j] <= min_distance]
solver.Add(solver.SumGreaterOrEqual(b, 1))
objective = solver.Minimize(z, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.AddObjective(z)
collector = solver.LastSolutionCollector(solution)
solver.Solve(solver.Phase(x + [z],
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT),
[collector, objective])
print "z:", collector.ObjectiveValue(0)
print "x:", [collector.Value(0, x[i]) for i in range(num_cities)]
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main("cp sample")
|
apache-2.0
| 5,832,000,809,426,198,000 | 3,959,492,235,693,046,000 | 25.767677 | 75 | 0.657358 | false |
ForgottenKahz/CloudOPC
|
venv/Lib/site-packages/sqlalchemy/testing/suite/test_update_delete.py
|
203
|
1582
|
from .. import fixtures, config
from ..assertions import eq_
from sqlalchemy import Integer, String
from ..schema import Table, Column
class SimpleUpdateDeleteTest(fixtures.TablesTest):
run_deletes = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('plain_pk', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50))
)
@classmethod
def insert_data(cls):
config.db.execute(
cls.tables.plain_pk.insert(),
[
{"id": 1, "data": "d1"},
{"id": 2, "data": "d2"},
{"id": 3, "data": "d3"},
]
)
def test_update(self):
t = self.tables.plain_pk
r = config.db.execute(
t.update().where(t.c.id == 2),
data="d2_new"
)
assert not r.is_insert
assert not r.returns_rows
eq_(
config.db.execute(t.select().order_by(t.c.id)).fetchall(),
[
(1, "d1"),
(2, "d2_new"),
(3, "d3")
]
)
def test_delete(self):
t = self.tables.plain_pk
r = config.db.execute(
t.delete().where(t.c.id == 2)
)
assert not r.is_insert
assert not r.returns_rows
eq_(
config.db.execute(t.select().order_by(t.c.id)).fetchall(),
[
(1, "d1"),
(3, "d3")
]
)
__all__ = ('SimpleUpdateDeleteTest', )
|
mit
| 1,667,919,490,888,217,900 | -2,048,356,143,979,541,500 | 24.111111 | 70 | 0.457016 | false |
ibrica/universe-server
|
play.py
|
1
|
1073
|
from multiprocessing import Process
import time
import gym
import universe
from universe.spaces.vnc_event import keycode
from envs import create_env
def start_game(model, env_name):
"""regular Python process, not using torch"""
p = Process(target=play_game, args=(model,env_name))
p.start()
# Don't wait with join, respond to user request
def play_game(model, env_name):
"""Play game with saved model if ther's no model play random"""
env = create_env(env_name, client_id="play1",remotes=1) # Local docker container
max_game_length = 10000
state = env.reset()
reward_sum = 0
start_time = time.time()
for step in range(max_game_length ):
state, reward, done, _ = env.step( ['up' for i in range(60)]) #no saved model for now keep pressing up, 60 times in minute
reward_sum += reward
print("Time {}, game reward {}, game length {}".format(
time.strftime("%Hh %Mm %Ss"),
reward_sum,
time.gmtime(time.time() - start_time)))
if done:
break
|
mit
| 7,100,002,884,382,921,000 | 8,699,588,078,601,701,000 | 33.645161 | 130 | 0.630941 | false |
m038/superdesk-content-api
|
content_api/errors.py
|
6
|
1588
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""
A module that contains exception types for the Superdesk public API.
"""
from superdesk.errors import SuperdeskApiError
class PublicApiError(SuperdeskApiError):
"""Base class for all Superdesk public API errors."""
_codes = {
10000: "Unknown API error.",
}
"""A mapping of error codes to error messages."""
def __init__(self, error_code=10000, desc=None):
message = self._codes.get(error_code, 'Unknown error')
super().__init__(status_code=error_code, message=message, payload=desc)
class UnexpectedParameterError(PublicApiError):
"""Used when request contains an unexpected parameter."""
PublicApiError._codes[10001] = "Unexpected parameter."
def __init__(self, desc=None):
super().__init__(10001, desc=desc)
class BadParameterValueError(PublicApiError):
"""Used when request contains a parameter with an invalid value."""
PublicApiError._codes[10002] = "Bad parameter value."
def __init__(self, desc=None):
super().__init__(10002, desc=desc)
class FileNotFoundError(PublicApiError):
"""Used when trying to fetch a missing file."""
PublicApiError._codes[10003] = "File not found."
def __init__(self, desc=None):
super().__init__(10003, desc=desc)
|
agpl-3.0
| -817,440,969,741,808,300 | 614,852,826,289,465,700 | 27.357143 | 79 | 0.68136 | false |
vinodkc/spark
|
python/pyspark/ml/tests/test_training_summary.py
|
15
|
25466
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.ml.classification import BinaryLogisticRegressionSummary, \
BinaryRandomForestClassificationSummary, FMClassifier, \
FMClassificationSummary, LinearSVC, LinearSVCSummary, \
LogisticRegression, LogisticRegressionSummary, \
MultilayerPerceptronClassifier, MultilayerPerceptronClassificationSummary, \
RandomForestClassificationSummary, RandomForestClassifier
from pyspark.ml.clustering import BisectingKMeans, GaussianMixture, KMeans
from pyspark.ml.linalg import Vectors
from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression
from pyspark.sql import DataFrame
from pyspark.testing.mlutils import SparkSessionTestCase
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight",
fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertAlmostEqual(s.r2adj, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
glr = GeneralizedLinearRegression(family="gaussian", link="identity", weightCol="weight",
fitIntercept=False)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, str))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LogisticRegressionSummary))
self.assertFalse(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_linear_svc_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0, 1.0, 1.0)),
(0.0, 2.0, Vectors.dense(1.0, 2.0, 3.0))],
["label", "weight", "features"])
svc = LinearSVC(maxIter=5, weightCol="weight")
model = svc.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.scoreCol, "rawPrediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
print(s.weightedTruePositiveRate)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LinearSVCSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_binary_randomforest_classification_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"])
rf = RandomForestClassifier(weightCol="weight")
model = rf.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, BinaryRandomForestClassificationSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_randomforest_classification_summary(self):
df = self.spark.createDataFrame([(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9))],
["label", "weight", "features"])
rf = RandomForestClassifier(weightCol="weight")
model = rf.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, RandomForestClassificationSummary))
self.assertFalse(isinstance(sameSummary, BinaryRandomForestClassificationSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_fm_classification_summary(self):
df = self.spark.createDataFrame([(1.0, Vectors.dense(2.0)),
(0.0, Vectors.dense(2.0)),
(0.0, Vectors.dense(6.0)),
(1.0, Vectors.dense(3.0))
],
["label", "features"])
fm = FMClassifier(maxIter=5)
model = fm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.scoreCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 0.625, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.8333333333333333, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.7333333333333334, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.7333333333333334, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, FMClassificationSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_mlp_classification_summary(self):
df = self.spark.createDataFrame([(0.0, Vectors.dense([0.0, 0.0])),
(1.0, Vectors.dense([0.0, 1.0])),
(1.0, Vectors.dense([1.0, 0.0])),
(0.0, Vectors.dense([1.0, 1.0]))
],
["label", "features"])
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, MultilayerPerceptronClassificationSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_gaussian_mixture_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 3)
def test_bisecting_kmeans_summary(self):
data = [(Vectors.dense(1.0),), (Vectors.dense(5.0),), (Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),)]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 20)
def test_kmeans_summary(self):
data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 1)
if __name__ == "__main__":
from pyspark.ml.tests.test_training_summary import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
| 5,581,587,913,794,918,000 | 3,199,348,652,473,336,300 | 55.340708 | 97 | 0.661588 | false |
chrishas35/django-travis-ci
|
tests/regressiontests/handlers/tests.py
|
34
|
1232
|
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.test import RequestFactory
from django.utils import unittest
class HandlerTests(unittest.TestCase):
def test_lock_safety(self):
"""
Tests for bug #11193 (errors inside middleware shouldn't leave
the initLock locked).
"""
# Mangle settings so the handler will fail
old_middleware_classes = settings.MIDDLEWARE_CLASSES
settings.MIDDLEWARE_CLASSES = 42
# Try running the handler, it will fail in load_middleware
handler = WSGIHandler()
self.assertEqual(handler.initLock.locked(), False)
try:
handler(None, None)
except:
pass
self.assertEqual(handler.initLock.locked(), False)
# Reset settings
settings.MIDDLEWARE_CLASSES = old_middleware_classes
def test_bad_path_info(self):
"""Tests for bug #15672 ('request' referenced before assignment)"""
environ = RequestFactory().get('/').environ
environ['PATH_INFO'] = '\xed'
handler = WSGIHandler()
response = handler(environ, lambda *a, **k: None)
self.assertEqual(response.status_code, 400)
|
bsd-3-clause
| -1,066,509,367,532,649,500 | -2,469,351,623,614,900,000 | 35.235294 | 75 | 0.650974 | false |
conwin/node-gyp
|
gyp/test/mac/gyptest-app.py
|
85
|
1409
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test App Gyp') # Variable expansion
# Resources
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/InfoPlist.strings',
chdir='app-bundle')
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
test.pass_test()
|
mit
| 2,956,555,700,744,493,000 | -3,605,370,343,301,790,000 | 28.978723 | 80 | 0.643719 | false |
sandeepdsouza93/TensorFlow-15712
|
tensorflow/examples/learn/hdf5_classification.py
|
17
|
2201
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
import h5py # pylint: disable=g-bad-import-order
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
| -5,930,522,487,322,949,000 | -2,211,440,557,567,374,600 | 35.081967 | 78 | 0.712403 | false |
droundy/deft
|
talks/colloquium/figs/plot-walls.py
|
1
|
3242
|
#!/usr/bin/python
# We need the following two lines in order for matplotlib to work
# without access to an X server.
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import pylab, numpy, sys
xmax = 2.5
xmin = -0.4
def plotit(dftdata, mcdata):
dft_len = len(dftdata[:,0])
dft_dr = dftdata[2,0] - dftdata[1,0]
mcdata = numpy.insert(mcdata,0,0,0)
mcdata[0,0]=-10
mcoffset = 10/2
offset = -3/2
n0 = dftdata[:,6]
nA = dftdata[:,8]
nAmc = mcdata[:,11]
n0mc = mcdata[:,10]
pylab.figure(figsize=(6, 6))
pylab.subplots_adjust(hspace=0.001)
n_plt = pylab.subplot(3,1,3)
n_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,1]*4*numpy.pi/3,"b-",label='$n$ Monte Carlo')
n_plt.plot(dftdata[:,0]/2+offset,dftdata[:,1]*4*numpy.pi/3,"b--",label='$n$ DFT')
n_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
n_plt.yaxis.set_major_locator(pylab.MaxNLocator(6,steps=[1,5,10],prune='upper'))
pylab.ylim(ymin=0)
pylab.xlim(xmin, xmax)
pylab.xlabel("$z/\sigma$")
pylab.ylabel("$n(\mathbf{r})$")
n_plt.axvline(x=0, color='k', linestyle=':')
n = len(mcdata[:,0])
#pylab.twinx()
dftr = dftdata[:,0]/2+offset
thiswork = dftdata[:,5]
gross = dftdata[:,7]
stop_here = int(dft_len - 1/dft_dr)
print stop_here
start_here = int(2.5/dft_dr)
off = 1
me = 40
A_plt = pylab.subplot(3,1,1)
A_plt.axvline(x=0, color='k', linestyle=':')
A_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,2+2*off]/nAmc,"r-",label="$g_\sigma^A$ Monte Carlo")
A_plt.plot(dftr[dftr>=0],thiswork[dftr>=0],"ro",markevery=me*.8,label="$g_\sigma^A$ this work")
A_plt.plot(dftr[dftr>=0],gross[dftr>=0],"rx",markevery=me,label="Gross",
markerfacecolor='none',markeredgecolor='red', markeredgewidth=1)
A_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
A_plt.yaxis.set_major_locator(pylab.MaxNLocator(integer=True,prune='upper'))
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^A$")
pylab.xlim(xmin, xmax)
n0mc[0]=1
mcdata[0,10]=1
S_plt = pylab.subplot(3,1,2)
S_plt.axvline(x=0, color='k', linestyle=':')
S_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,3+2*off]/n0mc,"g-",label="$g_\sigma^S$ Monte Carlo")
S_plt.plot(dftdata[:,0]/2+offset,dftdata[:,4],"gx",markevery=me/2,label="Yu and Wu")
S_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
#pylab.ylim(ymax=12)
S_plt.yaxis.set_major_locator(pylab.MaxNLocator(5,integer=True,prune='upper'))
pylab.xlim(xmin, xmax)
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^S$")
xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels()
pylab.setp(xticklabels, visible=False)
mcdata10 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-196.dat')
dftdata10 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.10.dat')
mcdata40 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-817.dat')
dftdata40 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.40.dat')
plotit(dftdata10, mcdata10)
pylab.savefig('figs/walls-10.pdf', transparent=True)
plotit(dftdata40, mcdata40)
pylab.savefig('figs/walls-40.pdf', transparent=True)
|
gpl-2.0
| 5,312,503,659,283,540,000 | -886,336,956,664,563,100 | 33.489362 | 99 | 0.637569 | false |
iddqd1/django-cms
|
cms/test_utils/project/urls.py
|
47
|
1646
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.test_utils.project.sampleapp.forms import LoginForm, LoginForm2, LoginForm3
admin.autodiscover()
urlpatterns = [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^media/cms/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': get_cms_setting('MEDIA_ROOT'), 'show_indexes': True}),
url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += i18n_patterns('',
url(r'^sample/login_other/$', 'django.contrib.auth.views.login',
kwargs={'authentication_form': LoginForm2}),
url(r'^sample/login/$', 'django.contrib.auth.views.login',
kwargs={'authentication_form': LoginForm}),
url(r'^sample/login3/$', 'django.contrib.auth.views.login',
kwargs={'authentication_form': LoginForm3}),
url(r'^admin/', include(admin.site.urls)),
url(r'^example/$', 'cms.test_utils.project.placeholderapp.views.example_view'),
url(r'^plain_view/$', 'cms.test_utils.project.sampleapp.views.plain_view'),
url(r'^', include('cms.urls')),
)
if settings.DEBUG and is_installed('debug_toolbar'):
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
bsd-3-clause
| 3,509,086,433,595,693,000 | 4,172,646,344,932,716,500 | 38.190476 | 84 | 0.68955 | false |
michaelaye/vispy
|
examples/basics/scene/volume.py
|
4
|
5162
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# vispy: gallery 2
"""
Example volume rendering
Controls:
* 1 - toggle camera between first person (fly), regular 3D (turntable) and
arcball
* 2 - toggle between volume rendering methods
* 3 - toggle between stent-CT / brain-MRI image
* 4 - toggle between colormaps
* 0 - reset cameras
* [] - decrease/increase isosurface threshold
With fly camera:
* WASD or arrow keys - move around
* SPACE - brake
* FC - move up-down
* IJKL or mouse - look around
"""
from itertools import cycle
import numpy as np
from vispy import app, scene, io
from vispy.color import get_colormaps, BaseColormap
from vispy.visuals.transforms import STTransform
# Read volume
vol1 = np.load(io.load_data_file('volume/stent.npz'))['arr_0']
vol2 = np.load(io.load_data_file('brain/mri.npz'))['data']
vol2 = np.flipud(np.rollaxis(vol2, 1))
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals, only one is visible
volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume1.transform = scene.STTransform(translate=(64, 64, 0))
volume2 = scene.visuals.Volume(vol2, parent=view.scene, threshold=0.2,
emulate_texture=emulate_texture)
volume2.visible = False
# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
# Create an XYZAxis visual
axis = scene.visuals.XYZAxis(parent=view)
s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1))
affine = s.as_matrix()
axis.transform = affine
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
# Setup colormap iterators
opaque_cmaps = cycle(get_colormaps())
translucent_cmaps = cycle([TransFire(), TransGrays()])
opaque_cmap = next(opaque_cmaps)
translucent_cmap = next(translucent_cmaps)
# Implement axis connection with cam2
@canvas.events.mouse_move.connect
def on_mouse_move(event):
if event.button == 1 and event.is_dragging:
axis.transform.reset()
axis.transform.rotate(cam2.roll, (0, 0, 1))
axis.transform.rotate(cam2.elevation, (1, 0, 0))
axis.transform.rotate(cam2.azimuth, (0, 1, 0))
axis.transform.scale((50, 50, 0.001))
axis.transform.translate((50., 50.))
axis.update()
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
global opaque_cmap, translucent_cmap
if event.text == '1':
cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1}
view.camera = cam_toggle.get(view.camera, cam2)
print(view.camera.name + ' camera')
if view.camera is cam2:
axis.visible = True
else:
axis.visible = False
elif event.text == '2':
methods = ['mip', 'translucent', 'iso', 'additive']
method = methods[(methods.index(volume1.method) + 1) % 4]
print("Volume render method: %s" % method)
cmap = opaque_cmap if method in ['mip', 'iso'] else translucent_cmap
volume1.method = method
volume1.cmap = cmap
volume2.method = method
volume2.cmap = cmap
elif event.text == '3':
volume1.visible = not volume1.visible
volume2.visible = not volume1.visible
elif event.text == '4':
if volume1.method in ['mip', 'iso']:
cmap = opaque_cmap = next(opaque_cmaps)
else:
cmap = translucent_cmap = next(translucent_cmaps)
volume1.cmap = cmap
volume2.cmap = cmap
elif event.text == '0':
cam1.set_range()
cam3.set_range()
elif event.text != '' and event.text in '[]':
s = -0.025 if event.text == '[' else 0.025
volume1.threshold += s
volume2.threshold += s
th = volume1.threshold if volume1.visible else volume2.threshold
print("Isosurface threshold: %0.3f" % th)
# for testing performance
# @canvas.connect
# def on_draw(ev):
# canvas.update()
if __name__ == '__main__':
print(__doc__)
app.run()
|
bsd-3-clause
| 3,383,191,955,301,900,300 | 2,427,498,102,963,376,000 | 31.062112 | 79 | 0.62902 | false |
AprilBrother/esptool
|
esptool.py
|
1
|
28432
|
#!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import time
import argparse
import os
import subprocess
import tempfile
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x100
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port=0, baud=ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length=1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0' \
+ (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \
+ '\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=None, chk=0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries = 100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# worst-case latency timer should be 255ms (probably <20ms)
self._port.timeout = 0.3
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us=0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise FatalError('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
ESPROM.checksum(data))[1] != "\0\0":
raise FatalError('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise FatalError('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, size, offset):
old_tmo = self._port.timeout
num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK
sectors_per_block = 16
sector_size = 4096
num_sectors = (size + sector_size - 1) / sector_size
start_sector = offset / sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
erase_size = (num_sectors + 1) / 2 * sector_size
else:
erase_size = (num_sectors - head_sectors) * sector_size
self._port.timeout = 10
result = self.command(ESPROM.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to enter Flash download mode (result "%s")', result)
self._port.timeout = old_tmo
""" Write block to flash """
def flash_block(self, data, seq):
result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result)
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0":
raise FatalError('Failed to leave Flash mode')
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read MAC from OTP ROM """
def read_mac(self):
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
if ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
self.flash_begin(0, 0)
self.write_reg(0x60000240, 0x0, 0xffffffff)
self.write_reg(0x60000200, 0x10000000, 0xffffffff)
flash_id = self.read_reg(0x60000240)
self.flash_finish(False)
return flash_id
""" Read SPI flash """
def flash_read(self, offset, size, count=1):
# Create a custom stub
stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# Download stub
self.mem_begin(len(stub), 1, len(stub), 0x40100000)
self.mem_block(stub, 0)
self.mem_finish(0x4010001c)
# Fetch the data
data = ''
for _ in xrange(count):
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet (sflash read)')
data += self.read(size)
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet (sflash read)')
return data
""" Abuse the loader protocol to force flash to be left in write mode """
def flash_unlock_dio(self):
# Enable flash write mode
self.flash_begin(0, 0)
# Reset the chip rather than call flash_finish(), which would have
# write protected the chip again (why oh why does it do that?!)
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40000080)
""" Perform a chip erase of SPI flash """
def flash_erase(self):
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# This is hacky: we don't have a custom stub, instead we trick
# the bootloader to jump to the SPIEraseChip() routine and then halt/crash
# when it tries to boot an unconfigured system.
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40004984)
# Yup - there's no good way to detect if we succeeded.
# It it on the other hand unlikely to fail.
class ESPFirmwareImage:
def __init__(self, filename=None):
self.segments = []
self.entrypoint = 0
self.flash_mode = 0
self.flash_size_freq = 0
if filename is not None:
f = file(filename, 'rb')
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8))
# some sanity check
if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16:
raise FatalError('Invalid firmware image')
for i in xrange(segments):
(offset, size) = struct.unpack('<II', f.read(8))
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size))
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
self.segments.append((offset, size, segment_data))
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align = 15 - (f.tell() % 16)
f.seek(align, 1)
self.checksum = ord(f.read(1))
def add_segment(self, addr, data):
# Data should be aligned on word boundary
l = len(data)
if l % 4:
data += b"\x00" * (4 - l % 4)
if l > 0:
self.segments.append((addr, len(data), data))
def save(self, filename):
f = file(filename, 'wb')
f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (offset, size, data) in self.segments:
f.write(struct.pack('<II', offset, size))
f.write(data)
checksum = ESPROM.checksum(data, checksum)
align = 15 - (f.tell() % 16)
f.seek(align, 1)
f.write(struct.pack('B', checksum))
class ELFFile:
def __init__(self, name):
self.name = name
self.symbols = None
def _fetch_symbols(self):
if self.symbols is not None:
return
self.symbols = {}
try:
tool_nm = "xtensa-lx106-elf-nm"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_nm = "xt-nm"
proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
try:
if fields[0] == "U":
print "Warning: ELF binary has undefined symbol %s" % fields[1]
continue
self.symbols[fields[2]] = int(fields[0], 16)
except ValueError:
raise FatalError("Failed to strip symbol output from nm: %s" % fields)
def get_symbol_addr(self, sym):
self._fetch_symbols()
return self.symbols[sym]
def get_entry_point(self):
tool_readelf = "xtensa-lx106-elf-readelf"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_readelf = "xt-readelf"
try:
proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
if fields[0] == "Entry":
return int(fields[3], 0)
def load_section(self, section):
tool_objcopy = "xtensa-lx106-elf-objcopy"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_objcopy = "xt-objcopy"
tmpsection = tempfile.mktemp(suffix=".section")
try:
subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection])
with open(tmpsection, "rb") as f:
data = f.read()
finally:
os.remove(tmpsection)
return data
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) / int(b)
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that includes the hex values of
'result' as a string formatted argument.
"""
return FatalError(message % ", ".join(hex(ord(x)) for x in result))
def main():
parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default='/dev/ttyUSB0')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=arg_auto_int,
default=ESPROM.ESP_ROM_BAUD)
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space')
parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str)
parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
args = parser.parse_args()
# Create the ESPROM connection object, if needed
esp = None
if args.operation not in ('image_info','make_image','elf2image'):
esp = ESPROM(args.port, args.baud)
esp.connect()
# Do the actual work. Should probably be split into separate functions.
if args.operation == 'load_ram':
image = ESPFirmwareImage(args.filename)
print 'RAM boot...'
for (offset, size, data) in image.segments:
print 'Downloading %d bytes at %08x...' % (size, offset),
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print 'done!'
print 'All segments done, executing at %08x' % image.entrypoint
esp.mem_finish(image.entrypoint)
elif args.operation == 'read_mem':
print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address))
elif args.operation == 'write_mem':
esp.write_reg(args.address, args.value, args.mask, 0)
print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address)
elif args.operation == 'dump_mem':
f = file(args.filename, 'wb')
for i in xrange(args.size / 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack('<I', d))
if f.tell() % 1024 == 0:
print '\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 / args.size),
sys.stdout.flush()
print 'Done!'
elif args.operation == 'write_flash':
assert len(args.addr_filename) % 2 == 0
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_info = struct.pack('BB', flash_mode, flash_size_freq)
while args.addr_filename:
address = int(args.addr_filename[0], 0)
filename = args.addr_filename[1]
args.addr_filename = args.addr_filename[2:]
image = file(filename, 'rb').read()
print 'Erasing flash...'
blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK)
esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address)
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print '\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks),
sys.stdout.flush()
block = image[0:esp.ESP_FLASH_BLOCK]
# Fix sflash config data
if address == 0 and seq == 0 and block[0] == '\xe9':
block = block[0:2] + flash_info + block[4:]
# Pad the last block
block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block))
esp.flash_block(block, seq)
image = image[esp.ESP_FLASH_BLOCK:]
seq += 1
written += len(block)
t = time.time() - t
print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000)
print '\nLeaving...'
if args.flash_mode == 'dio':
esp.flash_unlock_dio()
else:
esp.flash_begin(0, 0)
esp.flash_finish(False)
elif args.operation == 'run':
esp.run()
elif args.operation == 'image_info':
image = ESPFirmwareImage(args.filename)
print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set'
print '%d segments' % len(image.segments)
print
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (idx, (offset, size, data)) in enumerate(image.segments):
print 'Segment %d: %5d bytes at %08x' % (idx + 1, size, offset)
checksum = ESPROM.checksum(data, checksum)
print
print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!')
elif args.operation == 'make_image':
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = file(seg, 'rb').read()
image.add_segment(addr, data)
image.entrypoint = args.entrypoint
image.save(args.output)
elif args.operation == 'elf2image':
if args.output is None:
args.output = args.input + '-'
e = ELFFile(args.input)
image = ESPFirmwareImage()
image.entrypoint = e.get_entry_point()
for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")):
data = e.load_section(section)
image.add_segment(e.get_symbol_addr(start), data)
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
image.save(args.output + "0x00000.bin")
data = e.load_section(".irom0.text")
off = e.get_symbol_addr("_irom0_text_start") - 0x40200000
assert off >= 0
f = open(args.output + "0x%05x.bin" % off, "wb")
f.write(data)
f.close()
elif args.operation == 'read_mac':
mac = esp.read_mac()
print 'MAC: %s' % ':'.join(map(lambda x: '%02x' % x, mac))
elif args.operation == 'flash_id':
flash_id = esp.flash_id()
print 'Manufacturer: %02x' % (flash_id & 0xff)
print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff)
elif args.operation == 'read_flash':
print 'Please wait...'
file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size])
elif args.operation == 'erase_flash':
esp.flash_erase()
if __name__ == '__main__':
try:
main()
except FatalError as e:
print '\nA fatal error occurred: %s' % e
sys.exit(2)
|
gpl-2.0
| -7,000,110,315,298,179,000 | -7,697,851,108,916,912,000 | 38.709497 | 152 | 0.573579 | false |
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/coghq/SellbotMegaFactoryLavaRoomFoyer_Action01.py
|
7
|
8680
|
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_10/models/cashbotHQ/ZONE18a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
10000: {'type': 'attribModifier',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10001: {'type': 'attribModifier',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10004,
'attribName': 'scale',
'recursive': 1,
'typeName': 'model',
'value': 'Vec3(.955,1,1)'},
10019: {'type': 'attribModifier',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10015,
'attribName': 'modelPath',
'recursive': 1,
'typeName': 'model',
'value': ''},
10006: {'type': 'gear',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10007: {'type': 'gear',
'name': 'copy of <unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 4.28999996185),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0},
10009: {'type': 'gear',
'name': 'copy of <unnamed> (2)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 8.57999992371),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': -4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.055},
10014: {'type': 'gear',
'name': 'copy of <unnamed> (3)',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 12.8699998856),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'degreesPerSec': 4.0,
'gearScale': 14.193780914463838,
'modelType': 'mint',
'orientation': 'horizontal',
'phaseShift': 0.06},
10018: {'type': 'healBarrel',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10017,
'pos': Point3(-2.03643107414, 2.34967470169, 5.46433734894),
'hpr': Vec3(34.1522636414, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'rewardPerGrab': 5,
'rewardPerGrabMax': 0},
10002: {'type': 'model',
'name': '<unnamed>',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(6.5, 6.5, 6.5),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/RoundShadow.bam'},
10005: {'type': 'model',
'name': 'doorwayCrate',
'comment': '',
'parentEntId': 0,
'pos': Point3(27.0090961456, 0.850000023842, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10008: {'type': 'model',
'name': 'shaft',
'comment': '',
'parentEntId': 10003,
'pos': Point3(0.0, 0.0, 7.25891637802),
'hpr': Vec3(0.0, 0.0, 180.0),
'scale': Vec3(5.35842609406, 5.35842609406, 5.35842609406),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModel',
'modelPath': 'phase_10/models/cashbotHQ/MintGearPost.bam'},
10010: {'type': 'model',
'name': 'middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10011: {'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(-5.72357320786, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10012: {'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(5.71999979019, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10013: {'type': 'model',
'name': 'copy of middle',
'comment': '',
'parentEntId': 10004,
'pos': Point3(11.4399995804, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(0.954999983311, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10015: {'type': 'model',
'name': 'crateStack',
'comment': '',
'parentEntId': 0,
'pos': Point3(-18.0376968384, 20.2023410797, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10016: {'type': 'model',
'name': 'upper',
'comment': '',
'parentEntId': 10015,
'pos': Point3(0.0, 0.0, 5.42841148376),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10017: {'type': 'model',
'name': 'copy of upper',
'comment': '',
'parentEntId': 10016,
'pos': Point3(0.0, 0.0, 5.43412637711),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cogHQ/CBMetalCrate2.bam'},
10021: {'type': 'model',
'name': 'crateStack',
'comment': '',
'parentEntId': 10020,
'pos': Point3(21.064825058, 20.1899757385, 9.87216758728),
'hpr': Vec3(270.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_10/models/cashbotHQ/crates_C1.bam'},
10003: {'type': 'nodepath',
'name': 'gears',
'comment': '',
'parentEntId': 0,
'pos': Point3(-3.18650078773, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': Vec3(1.0, 1.0, 1.0)},
10004: {'type': 'nodepath',
'name': 'wall',
'comment': '',
'parentEntId': 0,
'pos': Point3(19.5468139648, 6.37875938416, 0.0),
'hpr': Point3(270.0, 0.0, 0.0),
'scale': Vec3(1.95812249184, 1.5, 1.79999995232)},
10020: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0.0, 0.0, 0.0),
'hpr': Vec3(0.0, 0.0, 0.0),
'scale': 1}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
apache-2.0
| 8,848,032,045,560,647,000 | -3,381,456,991,456,852,000 | 33.859438 | 69 | 0.487327 | false |
phil-lopreiato/the-blue-alliance
|
tests/consts_tests/fcm/test_platform_priority.py
|
3
|
1613
|
import unittest2
from consts.fcm.platform_priority import PlatformPriority
from consts.fcm.platform_type import PlatformType
class TestPlatformPriority(unittest2.TestCase):
def test_validate_invalid(self):
with self.assertRaises(ValueError):
PlatformPriority.validate(2)
def test_validate(self):
PlatformPriority.validate(PlatformPriority.NORMAL)
PlatformPriority.validate(PlatformPriority.HIGH)
def test_platform_priority_invalid_platform(self):
with self.assertRaises(ValueError):
PlatformPriority.platform_priority(-1, PlatformPriority.HIGH)
def test_platform_priority_invalid_priority(self):
with self.assertRaises(ValueError):
PlatformPriority.platform_priority(PlatformType.ANDROID, -1)
def test_platform_priority_android(self):
self.assertEqual(PlatformPriority.platform_priority(PlatformType.ANDROID, PlatformPriority.HIGH), 'high')
self.assertEqual(PlatformPriority.platform_priority(PlatformType.ANDROID, PlatformPriority.NORMAL), 'normal')
def test_platform_priority_apns(self):
self.assertEqual(PlatformPriority.platform_priority(PlatformType.APNS, PlatformPriority.HIGH), '10')
self.assertEqual(PlatformPriority.platform_priority(PlatformType.APNS, PlatformPriority.NORMAL), '5')
def test_platform_priority_webpush(self):
self.assertEqual(PlatformPriority.platform_priority(PlatformType.WEBPUSH, PlatformPriority.HIGH), 'high')
self.assertEqual(PlatformPriority.platform_priority(PlatformType.WEBPUSH, PlatformPriority.NORMAL), 'normal')
|
mit
| -4,594,654,286,282,214,000 | -7,413,565,133,172,195,000 | 45.085714 | 117 | 0.761314 | false |
homhei/glance
|
glance/glance_pull/lib/GlanceBattery.py
|
2
|
3375
|
#!/usr/bin/env python
#encode=utf-8
#vim: tabstop=4 shiftwidth=4 softtabstop=4
#Created on 2013-8-17
#Copyright 2013 nuoqingyun xuqifeng
import sys
# Somes libs depends of OS
is_BSD = sys.platform.find('bsd') != -1
is_Linux = sys.platform.startswith('linux')
is_Mac = sys.platform.startswith('darwin')
is_Windows = sys.platform.startswith('win')
try:
# psutil is the main library used to grab stats
import psutil
except ImportError:
print('PsUtil module not found. Glances cannot start.')
sys.exit(1)
psutil_version = tuple([int(num) for num in psutil.__version__.split('.')])
# this is not a mistake: psutil 0.5.1 is detected as 0.5.0
if psutil_version < (0, 5, 0):
print('PsUtil version %s detected.' % psutil.__version__)
print('PsUtil 0.5.1 or higher is needed. Glances cannot start.')
sys.exit(1)
try:
# psutil.virtual_memory() only available from psutil >= 0.6
psutil.virtual_memory()
except Exception:
psutil_mem_vm = False
else:
psutil_mem_vm = True
try:
# psutil.net_io_counters() only available from psutil >= 1.0.0
psutil.net_io_counters()
except Exception:
psutil_net_io_counters = False
else:
psutil_net_io_counters = True
if not is_Mac:
psutil_get_io_counter_tag = True
else:
# get_io_counters() not available on OS X
psutil_get_io_counter_tag = False
# batinfo library (optional; Linux-only)
if is_Linux:
try:
import batinfo
except ImportError:
batinfo_lib_tag = False
else:
batinfo_lib_tag = True
else:
batinfo_lib_tag = False
from oslo.config import cfg
glance_opts = [
cfg.StrOpt('',
default = '',
help = '')
]
CONF = cfg.CONF
CONF.register_opts(glance_opts)
# Default tag
sensors_tag = False
hddtemp_tag = False
network_tag = True
diskio_tag = True
fs_tag = True
process_tag = True
class glanceGrabBat:
"""
Get batteries stats using the Batinfo librairie
"""
def __init__(self):
"""
Init batteries stats
"""
if batinfo_lib_tag:
try:
self.bat = batinfo.batteries()
self.bat.stat[0]["capacity"]
self.initok = True
self.__update__()
except:
self.initok = False
else:
self.initok = False
def __update__(self):
"""
Update the stats
"""
if self.initok:
try:
self.bat.update()
except:
self.bat_list = []
else:
self.bat_list = self.bat.stat
else:
self.bat_list = []
def get(self):
# Update the stats
self.__update__()
return self.bat_list
def getcapacitypercent(self):
if not self.initok or self.bat_list == []:
return []
# Init the bsum (sum of percent) and bcpt (number of batteries)
# and Loop over batteries (yes a computer could have more than 1 battery)
bsum = 0
for bcpt in range(len(self.get())):
bsum = bsum + int(self.bat_list[bcpt].capacity)
bcpt = bcpt + 1
# Return the global percent
return int(bsum / bcpt)
|
apache-2.0
| -6,785,609,102,218,038,000 | -7,332,623,622,206,076,000 | 23.568182 | 81 | 0.560296 | false |
uclouvain/osis
|
base/migrations/0333_auto_20180820_1343.py
|
2
|
1081
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-08-20 13:43
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0332_auto_20180816_1540'),
]
operations = [
migrations.CreateModel(
name='Prerequisite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prerequisite', models.CharField(blank=True, max_length=240, null=True)),
('education_group_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.EducationGroupYear')),
('learning_unit_year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.LearningUnitYear')),
],
),
migrations.AlterUniqueTogether(
name='prerequisite',
unique_together=set([('learning_unit_year', 'education_group_year')]),
),
]
|
agpl-3.0
| -2,635,404,155,810,416,000 | -4,050,659,326,788,362,000 | 36.275862 | 135 | 0.620722 | false |
AnhellO/DAS_Sistemas
|
Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/core/validators.py
|
7
|
18596
|
import ipaddress
import os
import re
from urllib.parse import urlsplit, urlunsplit
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.ipv6 import is_valid_ipv6_address
from django.utils.translation import gettext_lazy as _, ngettext_lazy
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
def _lazy_re_compile(regex, flags=0):
"""Lazily compile a regex with flags."""
def _compile():
# Compile the regex if it was not passed pre-compiled.
if isinstance(regex, str):
return re.compile(regex, flags)
else:
assert not flags, "flags must be empty if regex is passed pre-compiled"
return regex
return SimpleLazyObject(_compile)
@deconstructible
class RegexValidator:
regex = ''
message = _('Enter a valid value.')
code = 'invalid'
inverse_match = False
flags = 0
def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if inverse_match is not None:
self.inverse_match = inverse_match
if flags is not None:
self.flags = flags
if self.flags and not isinstance(self.regex, str):
raise TypeError("If the flags are set, regex must be a regular expression string.")
self.regex = _lazy_re_compile(self.regex, self.flags)
def __call__(self, value):
"""
Validate that the input contains (or does *not* contain, if
inverse_match is True) a match for the regular expression.
"""
regex_matches = bool(self.regex.search(str(value)))
invalid_input = regex_matches if self.inverse_match else not regex_matches
if invalid_input:
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, RegexValidator) and
self.regex.pattern == other.regex.pattern and
self.regex.flags == other.regex.flags and
(self.message == other.message) and
(self.code == other.code) and
(self.inverse_match == other.inverse_match)
)
@deconstructible
class URLValidator(RegexValidator):
ul = '\u00a1-\uffff' # unicode letters range (must not be a raw string)
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]' # (simple regex, validated later)
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
# Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = (
r'\.' # dot
r'(?!-)' # can't start with a dash
r'(?:[a-z' + ul + '-]{2,63}' # domain label
r'|xn--[a-z0-9]{1,59})' # or punycode label
r'(?<!-)' # can't end with a dash
r'\.?' # may have a trailing dot
)
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
regex = _lazy_re_compile(
r'^(?:[a-z0-9\.\-\+]*)://' # scheme is validated separately
r'(?:\S+(?::\S*)?@)?' # user:pass authentication
r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')'
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # resource path
r'\Z', re.IGNORECASE)
message = _('Enter a valid URL.')
schemes = ['http', 'https', 'ftp', 'ftps']
def __init__(self, schemes=None, **kwargs):
super().__init__(**kwargs)
if schemes is not None:
self.schemes = schemes
def __call__(self, value):
# Check first if the scheme is valid
scheme = value.split('://')[0].lower()
if scheme not in self.schemes:
raise ValidationError(self.message, code=self.code)
# Then check full URL
try:
super().__call__(value)
except ValidationError as e:
# Trivial case failed. Try for possible IDN domain
if value:
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError: # for example, "Invalid IPv6 URL"
raise ValidationError(self.message, code=self.code)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlunsplit((scheme, netloc, path, query, fragment))
super().__call__(url)
else:
raise
else:
# Now verify IPv6 in the netloc part
host_match = re.search(r'^\[(.+)\](?::\d{2,5})?$', urlsplit(value).netloc)
if host_match:
potential_ip = host_match.groups()[0]
try:
validate_ipv6_address(potential_ip)
except ValidationError:
raise ValidationError(self.message, code=self.code)
# The maximum length of a full host name is 253 characters per RFC 1034
# section 3.1. It's defined to be 255 bytes or less, but this includes
# one byte for the length of the name and one byte for the trailing dot
# that's used to indicate absolute names in DNS.
if len(urlsplit(value).netloc) > 253:
raise ValidationError(self.message, code=self.code)
integer_validator = RegexValidator(
_lazy_re_compile(r'^-?\d+\Z'),
message=_('Enter a valid integer.'),
code='invalid',
)
def validate_integer(value):
return integer_validator(value)
@deconstructible
class EmailValidator:
message = _('Enter a valid email address.')
code = 'invalid'
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
domain_regex = _lazy_re_compile(
# max length for domain name labels is 63 characters per RFC 1034
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
re.IGNORECASE)
literal_regex = _lazy_re_compile(
# literal form, ipv4 or ipv6 address (SMTP 4.1.3)
r'\[([A-f0-9:\.]+)\]\Z',
re.IGNORECASE)
domain_whitelist = ['localhost']
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
if whitelist is not None:
self.domain_whitelist = whitelist
def __call__(self, value):
if not value or '@' not in value:
raise ValidationError(self.message, code=self.code)
user_part, domain_part = value.rsplit('@', 1)
if not self.user_regex.match(user_part):
raise ValidationError(self.message, code=self.code)
if (domain_part not in self.domain_whitelist and
not self.validate_domain_part(domain_part)):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
except UnicodeError:
pass
else:
if self.validate_domain_part(domain_part):
return
raise ValidationError(self.message, code=self.code)
def validate_domain_part(self, domain_part):
if self.domain_regex.match(domain_part):
return True
literal_match = self.literal_regex.match(domain_part)
if literal_match:
ip_address = literal_match.group(1)
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
pass
return False
def __eq__(self, other):
return (
isinstance(other, EmailValidator) and
(self.domain_whitelist == other.domain_whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
validate_email = EmailValidator()
slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z')
validate_slug = RegexValidator(
slug_re,
# Translators: "letters" means latin letters: a-z and A-Z.
_("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."),
'invalid'
)
slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z')
validate_unicode_slug = RegexValidator(
slug_unicode_re,
_("Enter a valid 'slug' consisting of Unicode letters, numbers, underscores, or hyphens."),
'invalid'
)
def validate_ipv4_address(value):
try:
ipaddress.IPv4Address(value)
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid')
def validate_ipv6_address(value):
if not is_valid_ipv6_address(value):
raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid')
def validate_ipv46_address(value):
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')),
'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')),
'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters, return the appropriate validators for
the GenericIPAddressField.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, list(ip_address_validator_map)))
def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False):
regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % {
'neg': '(-)?' if allow_negative else '',
'sep': re.escape(sep),
})
return RegexValidator(regexp, message=message, code=code)
validate_comma_separated_integer_list = int_list_validator(
message=_('Enter only digits separated by commas.'),
)
@deconstructible
class BaseValidator:
message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value, message=None):
self.limit_value = limit_value
if message:
self.message = message
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, self.limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.limit_value == other.limit_value and
self.message == other.message and
self.code == other.code
)
def compare(self, a, b):
return a is not b
def clean(self, x):
return x
@deconstructible
class MaxValueValidator(BaseValidator):
message = _('Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
def compare(self, a, b):
return a > b
@deconstructible
class MinValueValidator(BaseValidator):
message = _('Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
def compare(self, a, b):
return a < b
@deconstructible
class MinLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'min_length'
def compare(self, a, b):
return a < b
def clean(self, x):
return len(x)
@deconstructible
class MaxLengthValidator(BaseValidator):
message = ngettext_lazy(
'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).',
'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).',
'limit_value')
code = 'max_length'
def compare(self, a, b):
return a > b
def clean(self, x):
return len(x)
@deconstructible
class DecimalValidator:
"""
Validate that the input does not exceed the maximum number of digits
expected, otherwise raise ValidationError.
"""
messages = {
'max_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'
),
'max_decimal_places': ngettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'
),
'max_whole_digits': ngettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'
),
}
def __init__(self, max_digits, decimal_places):
self.max_digits = max_digits
self.decimal_places = decimal_places
def __call__(self, value):
digit_tuple, exponent = value.as_tuple()[1:]
if exponent >= 0:
# A positive exponent adds that many trailing zeros.
digits = len(digit_tuple) + exponent
decimals = 0
else:
# If the absolute value of the negative exponent is larger than the
# number of digits, then it's the same as the number of digits,
# because it'll consume all of the digits in digit_tuple and then
# add abs(exponent) - len(digit_tuple) leading zeros after the
# decimal point.
if abs(exponent) > len(digit_tuple):
digits = decimals = abs(exponent)
else:
digits = len(digit_tuple)
decimals = abs(exponent)
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None and
whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.max_digits == other.max_digits and
self.decimal_places == other.decimal_places
)
@deconstructible
class FileExtensionValidator:
message = _(
"File extension '%(extension)s' is not allowed. "
"Allowed extensions are: '%(allowed_extensions)s'."
)
code = 'invalid_extension'
def __init__(self, allowed_extensions=None, message=None, code=None):
if allowed_extensions is not None:
allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions]
self.allowed_extensions = allowed_extensions
if message is not None:
self.message = message
if code is not None:
self.code = code
def __call__(self, value):
extension = os.path.splitext(value.name)[1][1:].lower()
if self.allowed_extensions is not None and extension not in self.allowed_extensions:
raise ValidationError(
self.message,
code=self.code,
params={
'extension': extension,
'allowed_extensions': ', '.join(self.allowed_extensions)
}
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.allowed_extensions == other.allowed_extensions and
self.message == other.message and
self.code == other.code
)
def get_available_image_extensions():
try:
from PIL import Image
except ImportError:
return []
else:
Image.init()
return [ext.lower()[1:] for ext in Image.EXTENSION]
validate_image_file_extension = FileExtensionValidator(
allowed_extensions=get_available_image_extensions(),
)
@deconstructible
class ProhibitNullCharactersValidator:
"""Validate that the string doesn't contain the null character."""
message = _('Null characters are not allowed.')
code = 'null_characters_not_allowed'
def __init__(self, message=None, code=None):
if message is not None:
self.message = message
if code is not None:
self.code = code
def __call__(self, value):
if '\x00' in str(value):
raise ValidationError(self.message, code=self.code)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.message == other.message and
self.code == other.code
)
|
mit
| 4,037,022,720,249,074,000 | 1,230,467,153,281,725,700 | 33.69403 | 110 | 0.574747 | false |
eharney/cinder
|
cinder/api/v3/attachments.py
|
1
|
11362
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import attachments as attachment_views
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(202)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid', None)
if not instance_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'instance_uuid' "
"to create attachment."))
volume_uuid = body['attachment'].get('volume_uuid', None)
if not volume_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'volume_uuid' "
"to create attachment."))
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment'].get('connector', None)
if not connector:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector' "
"to update attachment."))
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(202)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
|
apache-2.0
| -2,170,219,958,094,626,600 | 3,917,587,111,729,390,000 | 38.451389 | 79 | 0.58106 | false |
travellhyne/f2py
|
fparser/parsefortran.py
|
3
|
5789
|
#!/usr/bin/env python
"""Provides FortranParser.
"""
#Author: Pearu Peterson <[email protected]>
#Created: May 2006
__autodoc__ = ['FortranParser']
__all__ = ['FortranParser']
import re
import sys
import traceback
import logging
from numpy.distutils.misc_util import yellow_text, red_text
from readfortran import FortranFileReader, FortranStringReader
from block_statements import BeginSource
from utils import AnalyzeError
logger = logging.getLogger('fparser')
class FortranParser(object):
"""Parser of FortranReader structure.
Use .parse() method for parsing, parsing result is saved in .block attribute.
"""
cache = {}
def __init__(self, reader, ignore_comments=True):
self.reader = reader
if reader.id in self.cache:
parser = self.cache[reader.id]
self.block = parser.block
self.is_analyzed = parser.is_analyzed
logger.info('using cached %s' % (reader.id))
# self.block.show_message('using cached %s' % (reader.id))
else:
self.cache[reader.id] = self
self.block = None
self.is_analyzed = False
self.ignore_comments = ignore_comments
return
def get_item(self):
try:
item = self.reader.next(ignore_comments = self.ignore_comments)
return item
except StopIteration:
pass
return
def put_item(self, item):
self.reader.fifo_item.insert(0, item)
return
def parse(self):
if self.block is not None:
return
try:
block = self.block = BeginSource(self)
except KeyboardInterrupt:
raise
except:
reader = self.reader
while reader is not None:
message = reader.format_message('FATAL ERROR',
'while processing line',
reader.linecount, reader.linecount)
logger.critical(message)
# reader.show_message(message, sys.stderr)
reader = reader.reader
logger.debug(''.join(('Traceback\n',''.join( traceback.format_stack() ))))
# traceback.print_exc(file=sys.stderr)
logger.critical(red_text('STOPPED PARSING'))
# self.reader.show_message(red_text('STOPPED PARSING'), sys.stderr)
return
return
def analyze(self):
if self.is_analyzed:
return
if self.block is None:
logger.info('Nothing to analyze.')
# self.reader.show_message('Nothing to analyze.')
return
try:
self.block.analyze()
except AnalyzeError:
pass
# except Exception, msg:
# import pdb; pdb.set_trace()
# if str(msg) != '123454321':
# #print self.block
# logger.debug(''.join(('Traceback\n',''.join( traceback.format_stack() ))))
# logger.critical(red_text('FATAL ERROR: STOPPED ANALYSING %r CONTENT' % (self.reader.source) ))
# # self.reader.show_message(red_text('FATAL ERROR: STOPPED ANALYSING %r CONTENT' % (self.reader.source) ), sys.stderr)
# sys.exit(123454321)
# return
self.is_analyzed = True
return
def test_pyf():
string = """
python module foo
interface tere
subroutine bar
real r
end subroutine bar
end interface tere
end python module foo
"""
reader = FortranStringReader(string, True, True)
parser = FortranParser(reader)
block = parser.parse()
print block
def test_free90():
string = """
module foo
subroutine bar
real r
if ( pc_get_lun() .ne. 6) &
write ( pc_get_lun(), '( &
& /, a, /, " p=", i4, " stopping c_flag=", a, &
& /, " print unit=", i8)') &
trim(title), pcpsx_i_pel(), trim(c_flag), pc_get_lun()
if (.true.) then
call smth
end if
aaa : if (.false.) then
else if (a) then aaa
else aaa
end if aaa
hey = 1
end subroutine bar
abstract interface
end interface
end module foo
"""
reader = FortranStringReader(string, True, False)
parser = FortranParser(reader)
block = parser.parse()
print block
def test_f77():
string = """\
program foo
a = 3
end
subroutine bar
end
pure function foo(a)
end
pure real*4 recursive function bar()
end
"""
reader = FortranStringReader(string, False, True)
parser = FortranParser(reader)
block = parser.parse()
print block
def simple_main():
import sys
if not sys.argv[1:]:
return parse_all_f()
for filename in sys.argv[1:]:
reader = FortranFileReader(filename)
print yellow_text('Processing '+filename+' (mode=%r)' % (reader.mode))
parser = FortranParser(reader)
parser.parse()
parser.analyze()
print parser.block.torepr(4)
#print parser.block
def profile_main():
import hotshot, hotshot.stats
prof = hotshot.Profile("_parsefortran.prof")
prof.runcall(simple_main)
prof.close()
stats = hotshot.stats.load("_parsefortran.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(30)
def parse_all_f():
for filename in open('opt_all_f.txt'):
filename = filename.strip()
reader = FortranFileReader(filename)
print yellow_text('Processing '+filename+' (mode=%r)' % (reader.mode))
parser = FortranParser(reader)
block = parser.parse()
print block
if __name__ == "__main__":
#test_f77()
#test_free90()
#test_pyf()
simple_main()
#profile_main()
#parse_all_f()
|
bsd-3-clause
| -9,213,836,211,900,005,000 | -4,352,219,095,284,717,000 | 27.377451 | 135 | 0.576956 | false |
laszlocsomor/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py
|
26
|
3206
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Permute bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class PermuteBijectorTest(test.TestCase):
"""Tests correctness of the Permute bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
expected_permutation = np.int32([2, 0, 1])
expected_x = np.random.randn(4, 2, 3)
expected_y = expected_x[..., expected_permutation]
with self.test_session() as sess:
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
[
permutation_,
x_,
y_,
fldj,
ildj,
] = sess.run([
bijector.permutation,
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x),
bijector.inverse_log_det_jacobian(expected_y),
], feed_dict={permutation_ph: expected_permutation})
self.assertEqual("permute", bijector.name)
self.assertAllEqual(expected_permutation, permutation_)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj, rtol=1e-6, atol=0)
def testRaisesOpError(self):
with self.test_session() as sess:
with self.assertRaisesOpError("Permutation over `d` must contain"):
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
sess.run(bijector.inverse([1.]),
feed_dict={permutation_ph: [1, 2]})
def testBijectiveAndFinite(self):
permutation = np.int32([2, 0, 1])
x = np.random.randn(4, 2, 3)
y = x[..., permutation]
with self.test_session():
bijector = Permute(
permutation=permutation,
validate_args=True)
assert_bijective_and_finite(bijector, x, y, rtol=1e-6, atol=0)
if __name__ == "__main__":
test.main()
|
apache-2.0
| -8,386,326,508,322,609,000 | -6,501,293,163,244,436,000 | 35.850575 | 94 | 0.660636 | false |
timm/timmnix
|
pypy3-v5.5.0-linux64/lib-python/3/test/test_strtod.py
|
4
|
20594
|
# Tests for the correctly-rounded string -> float conversions
# introduced in Python 2.7 and 3.1.
import random
import unittest
import re
import sys
import test.support
if getattr(sys, 'float_repr_style', '') != 'short':
raise unittest.SkipTest('correctly-rounded string->float conversions '
'not available on this system')
# Correctly rounded str -> float in pure Python, for comparison.
strtod_parser = re.compile(r""" # A numeric string consists of:
(?P<sign>[-+])? # an optional sign, followed by
(?=\d|\.\d) # a number with at least one digit
(?P<int>\d*) # having a (possibly empty) integer part
(?:\.(?P<frac>\d*))? # followed by an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and an optional exponent
\Z
""", re.VERBOSE | re.IGNORECASE).match
# Pure Python version of correctly rounded string->float conversion.
# Avoids any use of floating-point by returning the result as a hex string.
def strtod(s, mant_dig=53, min_exp = -1021, max_exp = 1024):
"""Convert a finite decimal string to a hex string representing an
IEEE 754 binary64 float. Return 'inf' or '-inf' on overflow.
This function makes no use of floating-point arithmetic at any
stage."""
# parse string into a pair of integers 'a' and 'b' such that
# abs(decimal value) = a/b, along with a boolean 'negative'.
m = strtod_parser(s)
if m is None:
raise ValueError('invalid numeric string')
fraction = m.group('frac') or ''
intpart = int(m.group('int') + fraction)
exp = int(m.group('exp') or '0') - len(fraction)
negative = m.group('sign') == '-'
a, b = intpart*10**max(exp, 0), 10**max(0, -exp)
# quick return for zeros
if not a:
return '-0x0.0p+0' if negative else '0x0.0p+0'
# compute exponent e for result; may be one too small in the case
# that the rounded value of a/b lies in a different binade from a/b
d = a.bit_length() - b.bit_length()
d += (a >> d if d >= 0 else a << -d) >= b
e = max(d, min_exp) - mant_dig
# approximate a/b by number of the form q * 2**e; adjust e if necessary
a, b = a << max(-e, 0), b << max(e, 0)
q, r = divmod(a, b)
if 2*r > b or 2*r == b and q & 1:
q += 1
if q.bit_length() == mant_dig+1:
q //= 2
e += 1
# double check that (q, e) has the right form
assert q.bit_length() <= mant_dig and e >= min_exp - mant_dig
assert q.bit_length() == mant_dig or e == min_exp - mant_dig
# check for overflow and underflow
if e + q.bit_length() > max_exp:
return '-inf' if negative else 'inf'
if not q:
return '-0x0.0p+0' if negative else '0x0.0p+0'
# for hex representation, shift so # bits after point is a multiple of 4
hexdigs = 1 + (mant_dig-2)//4
shift = 3 - (mant_dig-2)%4
q, e = q << shift, e - shift
return '{}0x{:x}.{:0{}x}p{:+d}'.format(
'-' if negative else '',
q // 16**hexdigs,
q % 16**hexdigs,
hexdigs,
e + 4*hexdigs)
TEST_SIZE = 10
class StrtodTests(unittest.TestCase):
def check_strtod(self, s):
"""Compare the result of Python's builtin correctly rounded
string->float conversion (using float) to a pure Python
correctly rounded string->float implementation. Fail if the
two methods give different results."""
try:
fs = float(s)
except OverflowError:
got = '-inf' if s[0] == '-' else 'inf'
except MemoryError:
got = 'memory error'
else:
got = fs.hex()
expected = strtod(s)
self.assertEqual(expected, got,
"Incorrectly rounded str->float conversion for {}: "
"expected {}, got {}".format(s, expected, got))
def test_short_halfway_cases(self):
# exact halfway cases with a small number of significant digits
for k in 0, 5, 10, 15, 20:
# upper = smallest integer >= 2**54/5**k
upper = -(-2**54//5**k)
# lower = smallest odd number >= 2**53/5**k
lower = -(-2**53//5**k)
if lower % 2 == 0:
lower += 1
for i in range(TEST_SIZE):
# Select a random odd n in [2**53/5**k,
# 2**54/5**k). Then n * 10**k gives a halfway case
# with small number of significant digits.
n, e = random.randrange(lower, upper, 2), k
# Remove any additional powers of 5.
while n % 5 == 0:
n, e = n // 5, e + 1
assert n % 10 in (1, 3, 7, 9)
# Try numbers of the form n * 2**p2 * 10**e, p2 >= 0,
# until n * 2**p2 has more than 20 significant digits.
digits, exponent = n, e
while digits < 10**20:
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
# Same again, but with extra trailing zeros.
s = '{}e{}'.format(digits * 10**40, exponent - 40)
self.check_strtod(s)
digits *= 2
# Try numbers of the form n * 5**p2 * 10**(e - p5), p5
# >= 0, with n * 5**p5 < 10**20.
digits, exponent = n, e
while digits < 10**20:
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
# Same again, but with extra trailing zeros.
s = '{}e{}'.format(digits * 10**40, exponent - 40)
self.check_strtod(s)
digits *= 5
exponent -= 1
def test_halfway_cases(self):
# test halfway cases for the round-half-to-even rule
for i in range(100 * TEST_SIZE):
# bit pattern for a random finite positive (or +0.0) float
bits = random.randrange(2047*2**52)
# convert bit pattern to a number of the form m * 2**e
e, m = divmod(bits, 2**52)
if e:
m, e = m + 2**52, e - 1
e -= 1074
# add 0.5 ulps
m, e = 2*m + 1, e - 1
# convert to a decimal string
if e >= 0:
digits = m << e
exponent = 0
else:
# m * 2**e = (m * 5**-e) * 10**e
digits = m * 5**-e
exponent = e
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_boundaries(self):
# boundaries expressed as triples (n, e, u), where
# n*10**e is an approximation to the boundary value and
# u*10**e is 1ulp
boundaries = [
(10000000000000000000, -19, 1110), # a power of 2 boundary (1.0)
(17976931348623159077, 289, 1995), # overflow boundary (2.**1024)
(22250738585072013831, -327, 4941), # normal/subnormal (2.**-1022)
(0, -327, 4941), # zero
]
for n, e, u in boundaries:
for j in range(1000):
digits = n + random.randrange(-3*u, 3*u)
exponent = e
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
n *= 10
u *= 10
e -= 1
def test_underflow_boundary(self):
# test values close to 2**-1075, the underflow boundary; similar
# to boundary_tests, except that the random error doesn't scale
# with n
for exponent in range(-400, -320):
base = 10**-exponent // 2**1075
for j in range(TEST_SIZE):
digits = base + random.randrange(-1000, 1000)
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_bigcomp(self):
for ndigs in 5, 10, 14, 15, 16, 17, 18, 19, 20, 40, 41, 50:
dig10 = 10**ndigs
for i in range(10 * TEST_SIZE):
digits = random.randrange(dig10)
exponent = random.randrange(-400, 400)
s = '{}e{}'.format(digits, exponent)
self.check_strtod(s)
def test_parsing(self):
# make '0' more likely to be chosen than other digits
digits = '000000123456789'
signs = ('+', '-', '')
# put together random short valid strings
# \d*[.\d*]?e
for i in range(1000):
for j in range(TEST_SIZE):
s = random.choice(signs)
intpart_len = random.randrange(5)
s += ''.join(random.choice(digits) for _ in range(intpart_len))
if random.choice([True, False]):
s += '.'
fracpart_len = random.randrange(5)
s += ''.join(random.choice(digits)
for _ in range(fracpart_len))
else:
fracpart_len = 0
if random.choice([True, False]):
s += random.choice(['e', 'E'])
s += random.choice(signs)
exponent_len = random.randrange(1, 4)
s += ''.join(random.choice(digits)
for _ in range(exponent_len))
if intpart_len + fracpart_len:
self.check_strtod(s)
else:
try:
float(s)
except ValueError:
pass
else:
assert False, "expected ValueError"
@test.support.bigmemtest(size=test.support._2G+10, memuse=4, dry_run=False)
def test_oversized_digit_strings(self, maxsize):
# Input string whose length doesn't fit in an INT.
s = "1." + "1" * maxsize
with self.assertRaises(ValueError):
float(s)
del s
s = "0." + "0" * maxsize + "1"
with self.assertRaises(ValueError):
float(s)
del s
def test_large_exponents(self):
# Verify that the clipping of the exponent in strtod doesn't affect the
# output values.
def positive_exp(n):
""" Long string with value 1.0 and exponent n"""
return '0.{}1e+{}'.format('0'*(n-1), n)
def negative_exp(n):
""" Long string with value 1.0 and exponent -n"""
return '1{}e-{}'.format('0'*n, n)
self.assertEqual(float(positive_exp(10000)), 1.0)
self.assertEqual(float(positive_exp(20000)), 1.0)
self.assertEqual(float(positive_exp(30000)), 1.0)
self.assertEqual(float(negative_exp(10000)), 1.0)
self.assertEqual(float(negative_exp(20000)), 1.0)
self.assertEqual(float(negative_exp(30000)), 1.0)
def test_particular(self):
# inputs that produced crashes or incorrectly rounded results with
# previous versions of dtoa.c, for various reasons
test_strings = [
# issue 7632 bug 1, originally reported failing case
'2183167012312112312312.23538020374420446192e-370',
# 5 instances of issue 7632 bug 2
'12579816049008305546974391768996369464963024663104e-357',
'17489628565202117263145367596028389348922981857013e-357',
'18487398785991994634182916638542680759613590482273e-357',
'32002864200581033134358724675198044527469366773928e-358',
'94393431193180696942841837085033647913224148539854e-358',
'73608278998966969345824653500136787876436005957953e-358',
'64774478836417299491718435234611299336288082136054e-358',
'13704940134126574534878641876947980878824688451169e-357',
'46697445774047060960624497964425416610480524760471e-358',
# failing case for bug introduced by METD in r77451 (attempted
# fix for issue 7632, bug 2), and fixed in r77482.
'28639097178261763178489759107321392745108491825303e-311',
# two numbers demonstrating a flaw in the bigcomp 'dig == 0'
# correction block (issue 7632, bug 3)
'1.00000000000000001e44',
'1.0000000000000000100000000000000000000001e44',
# dtoa.c bug for numbers just smaller than a power of 2 (issue
# 7632, bug 4)
'99999999999999994487665465554760717039532578546e-47',
# failing case for off-by-one error introduced by METD in
# r77483 (dtoa.c cleanup), fixed in r77490
'965437176333654931799035513671997118345570045914469' #...
'6213413350821416312194420007991306908470147322020121018368e0',
# incorrect lsb detection for round-half-to-even when
# bc->scale != 0 (issue 7632, bug 6).
'104308485241983990666713401708072175773165034278685' #...
'682646111762292409330928739751702404658197872319129' #...
'036519947435319418387839758990478549477777586673075' #...
'945844895981012024387992135617064532141489278815239' #...
'849108105951619997829153633535314849999674266169258' #...
'928940692239684771590065027025835804863585454872499' #...
'320500023126142553932654370362024104462255244034053' #...
'203998964360882487378334860197725139151265590832887' #...
'433736189468858614521708567646743455601905935595381' #...
'852723723645799866672558576993978025033590728687206' #...
'296379801363024094048327273913079612469982585674824' #...
'156000783167963081616214710691759864332339239688734' #...
'656548790656486646106983450809073750535624894296242' #...
'072010195710276073042036425579852459556183541199012' #...
'652571123898996574563824424330960027873516082763671875e-1075',
# demonstration that original fix for issue 7632 bug 1 was
# buggy; the exit condition was too strong
'247032822920623295e-341',
# demonstrate similar problem to issue 7632 bug1: crash
# with 'oversized quotient in quorem' message.
'99037485700245683102805043437346965248029601286431e-373',
'99617639833743863161109961162881027406769510558457e-373',
'98852915025769345295749278351563179840130565591462e-372',
'99059944827693569659153042769690930905148015876788e-373',
'98914979205069368270421829889078356254059760327101e-372',
# issue 7632 bug 5: the following 2 strings convert differently
'1000000000000000000000000000000000000000e-16',
'10000000000000000000000000000000000000000e-17',
# issue 7632 bug 7
'991633793189150720000000000000000000000000000000000000000e-33',
# And another, similar, failing halfway case
'4106250198039490000000000000000000000000000000000000000e-38',
# issue 7632 bug 8: the following produced 10.0
'10.900000000000000012345678912345678912345',
# two humongous values from issue 7743
'116512874940594195638617907092569881519034793229385' #...
'228569165191541890846564669771714896916084883987920' #...
'473321268100296857636200926065340769682863349205363' #...
'349247637660671783209907949273683040397979984107806' #...
'461822693332712828397617946036239581632976585100633' #...
'520260770761060725403904123144384571612073732754774' #...
'588211944406465572591022081973828448927338602556287' #...
'851831745419397433012491884869454462440536895047499' #...
'436551974649731917170099387762871020403582994193439' #...
'761933412166821484015883631622539314203799034497982' #...
'130038741741727907429575673302461380386596501187482' #...
'006257527709842179336488381672818798450229339123527' #...
'858844448336815912020452294624916993546388956561522' #...
'161875352572590420823607478788399460162228308693742' #...
'05287663441403533948204085390898399055004119873046875e-1075',
'525440653352955266109661060358202819561258984964913' #...
'892256527849758956045218257059713765874251436193619' #...
'443248205998870001633865657517447355992225852945912' #...
'016668660000210283807209850662224417504752264995360' #...
'631512007753855801075373057632157738752800840302596' #...
'237050247910530538250008682272783660778181628040733' #...
'653121492436408812668023478001208529190359254322340' #...
'397575185248844788515410722958784640926528544043090' #...
'115352513640884988017342469275006999104519620946430' #...
'818767147966495485406577703972687838176778993472989' #...
'561959000047036638938396333146685137903018376496408' #...
'319705333868476925297317136513970189073693314710318' #...
'991252811050501448326875232850600451776091303043715' #...
'157191292827614046876950225714743118291034780466325' #...
'085141343734564915193426994587206432697337118211527' #...
'278968731294639353354774788602467795167875117481660' #...
'4738791256853675690543663283782215866825e-1180',
# exercise exit conditions in bigcomp comparison loop
'2602129298404963083833853479113577253105939995688e2',
'260212929840496308383385347911357725310593999568896e0',
'26021292984049630838338534791135772531059399956889601e-2',
'260212929840496308383385347911357725310593999568895e0',
'260212929840496308383385347911357725310593999568897e0',
'260212929840496308383385347911357725310593999568996e0',
'260212929840496308383385347911357725310593999568866e0',
# 2**53
'9007199254740992.00',
# 2**1024 - 2**970: exact overflow boundary. All values
# smaller than this should round to something finite; any value
# greater than or equal to this one overflows.
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497792',
# 2**1024 - 2**970 - tiny
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497791.999',
# 2**1024 - 2**970 + tiny
'179769313486231580793728971405303415079934132710037' #...
'826936173778980444968292764750946649017977587207096' #...
'330286416692887910946555547851940402630657488671505' #...
'820681908902000708383676273854845817711531764475730' #...
'270069855571366959622842914819860834936475292719074' #...
'168444365510704342711559699508093042880177904174497792.001',
# 1 - 2**-54, +-tiny
'999999999999999944488848768742172978818416595458984375e-54',
'9999999999999999444888487687421729788184165954589843749999999e-54',
'9999999999999999444888487687421729788184165954589843750000001e-54',
# Value found by Rick Regan that gives a result of 2**-968
# under Gay's dtoa.c (as of Nov 04, 2010); since fixed.
# (Fixed some time ago in Python's dtoa.c.)
'0.0000000000000000000000000000000000000000100000000' #...
'000000000576129113423785429971690421191214034235435' #...
'087147763178149762956868991692289869941246658073194' #...
'51982237978882039897143840789794921875',
]
for s in test_strings:
self.check_strtod(s)
def test_main():
test.support.run_unittest(StrtodTests)
if __name__ == "__main__":
test_main()
|
mit
| -8,974,190,803,205,918,000 | -9,220,821,061,881,992,000 | 46.233945 | 80 | 0.594882 | false |
arbrandes/edx-platform
|
openedx/core/djangoapps/content/course_overviews/tests/test_tasks.py
|
4
|
1982
|
# lint-amnesty, pylint: disable=missing-module-docstring
from unittest import mock
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..tasks import enqueue_async_course_overview_update_tasks
class BatchedAsyncCourseOverviewUpdateTests(ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super().setUp()
self.course_1 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_2 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
self.course_3 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo)
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_all_courses_in_single_batch(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[],
force_update=True,
all_courses=True
)
called_args, called_kwargs = mock_update_courses.call_args_list[0]
assert sorted([self.course_1.id, self.course_2.id, self.course_3.id]) == sorted(called_args[0])
assert {'force_update': True} == called_kwargs
assert 1 == mock_update_courses.call_count
@mock.patch('openedx.core.djangoapps.content.course_overviews.models.CourseOverview.update_select_courses')
def test_enqueue_specific_courses_in_two_batches(self, mock_update_courses):
enqueue_async_course_overview_update_tasks(
course_ids=[str(self.course_1.id), str(self.course_2.id)],
force_update=True,
chunk_size=1,
all_courses=False
)
mock_update_courses.assert_has_calls([
mock.call([self.course_1.id], force_update=True),
mock.call([self.course_2.id], force_update=True)
])
|
agpl-3.0
| 1,448,993,373,424,995,800 | 5,745,548,923,515,000,000 | 44.045455 | 122 | 0.701816 | false |
turbokongen/home-assistant
|
homeassistant/components/plex/config_flow.py
|
1
|
15991
|
"""Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from .const import ( # pylint: disable=unused-import
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug("Updated config entry for %s", plex_server.friendly_name)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(title=plex_server.friendly_name, data=data)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER] = user_input[CONF_SERVER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = [
name
for (name, server_id) in self.available_servers
if server_id not in configured
]
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER] = available_servers[0]
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = {
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
hass_url = get_url(self.hass)
headers = {"Origin": hass_url}
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session, headers)
await self.plexauth.initiate_auth()
forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
async def async_step_reauth(self, data):
"""Handle a reauthorization flow request."""
self.current_login = dict(data)
return await self.async_step_user()
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
|
apache-2.0
| -7,659,936,257,255,139,000 | 1,519,775,652,993,261,800 | 36.36215 | 85 | 0.598837 | false |
gangadharkadam/v6_frappe
|
frappe/api.py
|
27
|
3521
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import json
import frappe
import frappe.handler
import frappe.client
import frappe.desk.reportview
from frappe.utils.response import build_response
from frappe import _
def handle():
"""
Handler for `/api` methods
### Examples:
`/api/method/{methodname}` will call a whitelisted method
`/api/resource/{doctype}` will query a table
examples:
- `?fields=["name", "owner"]`
- `?filters=[["Task", "name", "like", "%005"]]`
- `?limit_start=0`
- `?limit_page_length=20`
`/api/resource/{doctype}/{name}` will point to a resource
`GET` will return doclist
`POST` will insert
`PUT` will update
`DELETE` will delete
`/api/resource/{doctype}/{name}?run_method={method}` will run a whitelisted controller method
"""
parts = frappe.request.path[1:].split("/",3)
call = doctype = name = None
if len(parts) > 1:
call = parts[1]
if len(parts) > 2:
doctype = parts[2]
if len(parts) > 3:
name = parts[3]
if call=="method":
frappe.local.form_dict.cmd = doctype
return frappe.handler.handle()
elif call=="resource":
if "run_method" in frappe.local.form_dict:
method = frappe.local.form_dict.pop("run_method")
doc = frappe.get_doc(doctype, name)
doc.is_whitelisted(method)
if frappe.local.request.method=="GET":
if not doc.has_permission("read"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
if not doc.has_permission("write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
frappe.local.response.update({"data": doc.run_method(method, **frappe.local.form_dict)})
frappe.db.commit()
else:
if name:
if frappe.local.request.method=="GET":
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
frappe.local.response.update({"data": doc})
if frappe.local.request.method=="PUT":
data = json.loads(frappe.local.form_dict.data)
doc = frappe.get_doc(doctype, name)
if "flags" in data:
del data["flags"]
# Not checking permissions here because it's checked in doc.save
doc.update(data)
frappe.local.response.update({
"data": doc.save().as_dict()
})
frappe.db.commit()
if frappe.local.request.method=="DELETE":
# Not checking permissions here because it's checked in delete_doc
frappe.delete_doc(doctype, name)
frappe.local.response.http_status_code = 202
frappe.local.response.message = "ok"
frappe.db.commit()
elif doctype:
if frappe.local.request.method=="GET":
if frappe.local.form_dict.get('fields'):
frappe.local.form_dict['fields'] = json.loads(frappe.local.form_dict['fields'])
frappe.local.form_dict.setdefault('limit_page_length', 20)
frappe.local.response.update({
"data": frappe.call(frappe.client.get_list,
doctype, **frappe.local.form_dict)})
if frappe.local.request.method=="POST":
data = json.loads(frappe.local.form_dict.data)
data.update({
"doctype": doctype
})
frappe.local.response.update({
"data": frappe.get_doc(data).insert().as_dict()
})
frappe.db.commit()
else:
raise frappe.DoesNotExistError
else:
raise frappe.DoesNotExistError
return build_response("json")
|
mit
| -8,557,977,985,627,721,000 | 2,964,930,582,259,100,700 | 27.168 | 94 | 0.66856 | false |
vtapia/sssd
|
src/tests/python-test.py
|
4
|
16953
|
#!/usr/bin/env python
# coding=utf-8
# Authors:
# Jakub Hrozek <[email protected]>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import tempfile
import shutil
import unittest
import subprocess
import errno
# module under test
import pysss
class LocalTest(unittest.TestCase):
local_path = "/var/lib/sss/db/sssd.ldb"
def setUp(self):
self.local = pysss.local()
def _run_and_check(self, runme):
(status, output) = subprocess.call(runme, shell=True)
self.failUnlessEqual(status, 0, output)
def _get_object_info(self, name, subtree, domain):
search_dn = "dn=name=%s,cn=%s,cn=%s,cn=sysdb" % (name, subtree, domain)
try:
cmd = "ldbsearch -H %s %s" % (self.local_path, search_dn)
output = subprocess.check_call(cmd, shell=True)
output = output.decode('utf-8')
except subprocess.CalledProcessError:
return {}
kw = {}
for key, value in \
[l.split(':') for l in output.split('\n') if ":" in l]:
kw[key] = value.strip()
del kw['asq']
return kw
def get_user_info(self, name, domain="LOCAL"):
return self._get_object_info(name, "users", domain)
def get_group_info(self, name, domain="LOCAL"):
return self._get_object_info(name, "groups", domain)
def _validate_object(self, kw, name, **kwargs):
if kw == {}:
self.fail("Could not get %s info" % name)
for key in kwargs.keys():
self.assert_(str(kwargs[key]) == str(kw[key]),
"%s %s != %s %s" % (key, kwargs[key], key, kw[key]))
def validate_user(self, username, **kwargs):
return self._validate_object(self.get_user_info(username), "user",
**kwargs)
def validate_group(self, groupname, **kwargs):
return self._validate_object(self.get_group_info(groupname), "group",
**kwargs)
def _validate_no_object(self, kw, name):
if kw != {}:
self.fail("Got %s info" % name)
def validate_no_user(self, username):
return self._validate_no_object(self.get_user_info(username), "user")
def validate_no_group(self, groupname):
return self._validate_no_object(self.get_group_info(groupname),
"group")
def _get_object_membership(self, name, subtree, domain):
search_dn = "dn=name=%s,cn=%s,cn=%s,cn=sysdb" % (name, subtree, domain)
try:
cmd = "ldbsearch -H %s %s" % (self.local_path, search_dn)
output = subprocess.check_call(cmd, shell=True)
output = output.decode('utf-8')
except subprocess.CalledProcessError:
return []
members = [value.strip() for key, value in
[l.split(':') for l in output.split('\n') if ":" in l]
if key == "memberof"]
return members
def _assertMembership(self, name, group_list, subtree, domain):
members = self._get_object_membership(name, subtree, domain)
for group in group_list:
group_dn = "name=%s,cn=groups,cn=%s,cn=sysdb" % (group, domain)
if group_dn in members:
members.remove(group_dn)
else:
self.fail("Cannot find required group %s" % group_dn)
if len(members) > 0:
self.fail("More groups than selected")
def assertUserMembership(self, name, group_list, domain="LOCAL"):
return self._assertMembership(name, group_list, "users", domain)
def assertGroupMembership(self, name, group_list, domain="LOCAL"):
return self._assertMembership(name, group_list, "groups", domain)
def get_user_membership(self, name, domain="LOCAL"):
return self._get_object_membership(name, "users", domain)
def get_group_membership(self, name, domain="LOCAL"):
return self._get_object_membership(name, "groups", domain)
def add_group(self, groupname):
self._run_and_check("sss_groupadd %s" % (groupname))
def remove_group(self, groupname):
self._run_and_check("sss_groupdel %s" % (groupname))
def add_user(self, username):
self._run_and_check("sss_useradd %s" % (username))
def add_user_not_home(self, username):
self._run_and_check("sss_useradd -M %s" % (username))
def remove_user(self, username):
self._run_and_check("sss_userdel %s" % (username))
def remove_user_not_home(self, username):
self._run_and_check("sss_userdel -R %s" % (username))
class SanityTest(unittest.TestCase):
def testInstantiate(self):
"Test that the local backed binding can be instantiated"
local = pysss.local()
self.assert_(local.__class__, "<type 'sss.local'>")
class UseraddTest(LocalTest):
def tearDown(self):
if self.username:
self.remove_user(self.username)
def testUseradd(self):
"Test adding a local user"
self.username = "testUseradd"
self.local.useradd(self.username)
self.validate_user(self.username)
# check home directory was created with default name
self.assertEquals(os.access("/home/%s" % self.username, os.F_OK), True)
def testUseraddWithParams(self):
"Test adding a local user with modified parameters"
self.username = "testUseraddWithParams"
self.local.useradd(self.username,
gecos="foo bar",
homedir="/home/foobar",
shell="/bin/zsh")
self.validate_user(self.username,
gecos="foo bar",
homeDirectory="/home/foobar",
loginShell="/bin/zsh")
# check home directory was created with nondefault name
self.assertEquals(os.access("/home/foobar", os.F_OK), True)
def testUseraddNoHomedir(self):
"Test adding a local user without creating his home dir"
self.username = "testUseraddNoHomedir"
self.local.useradd(self.username, create_home=False)
self.validate_user(self.username)
# check home directory was not created
username_path = "/home/%s" % self.username
self.assertEquals(os.access(username_path, os.F_OK), False)
self.local.userdel(self.username, remove=False)
self.username = None # fool tearDown into not removing the user
def testUseraddAlternateSkeldir(self):
"Test adding a local user and init his homedir from a custom location"
self.username = "testUseraddAlternateSkeldir"
skeldir = tempfile.mkdtemp()
fd, path = tempfile.mkstemp(dir=skeldir)
fdo = os.fdopen(fd)
fdo.flush()
fdo.close
self.assertEquals(os.access(path, os.F_OK), True)
filename = os.path.basename(path)
try:
self.local.useradd(self.username, skel=skeldir)
self.validate_user(self.username)
path = "/home/%s/%s" % (self.username, filename)
self.assertEquals(os.access(path, os.F_OK), True)
finally:
shutil.rmtree(skeldir)
def testUseraddToGroups(self):
"Test adding a local user with group membership"
self.username = "testUseraddToGroups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.useradd(self.username,
groups=["gr1", "gr2"])
self.assertUserMembership(self.username,
["gr1", "gr2"])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
def testUseraddWithUID(self):
"Test adding a local user with a custom UID"
self.username = "testUseraddWithUID"
self.local.useradd(self.username,
uid=1024)
self.validate_user(self.username,
uidNumber=1024)
class UseraddTestNegative(LocalTest):
def testUseraddNoParams(self):
"Test that local.useradd() requires the username parameter"
self.assertRaises(TypeError, self.local.useradd)
def testUseraddUserAlreadyExists(self):
"Test adding a local with a duplicate name"
self.username = "testUseraddUserAlreadyExists"
self.local.useradd(self.username)
try:
self.local.useradd(self.username)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_user(self.username)
def testUseraddUIDAlreadyExists(self):
"Test adding a local with a duplicate user ID"
self.username = "testUseraddUIDAlreadyExists1"
self.local.useradd(self.username, uid=1025)
try:
self.local.useradd("testUseraddUIDAlreadyExists2", uid=1025)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_user(self.username)
class UserdelTest(LocalTest):
def testUserdel(self):
self.add_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
self.validate_user("testUserdel")
self.local.userdel("testUserdel")
self.validate_no_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), False)
def testUserdelNotHomedir(self):
self.add_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
self.validate_user("testUserdel")
self.local.userdel("testUserdel", remove=False)
self.validate_no_user("testUserdel")
self.assertEquals(os.access("/home/testUserdel", os.F_OK), True)
shutil.rmtree("/home/testUserdel")
os.remove("/var/mail/testUserdel")
def testUserdelNegative(self):
self.validate_no_user("testUserdelNegative")
try:
self.local.userdel("testUserdelNegative")
except IOError as e:
self.assertEquals(e.errno, errno.ENOENT)
else:
fail("Was expecting exception")
class UsermodTest(LocalTest):
def setUp(self):
self.local = pysss.local()
self.username = "UsermodTest"
self.add_user_not_home(self.username)
def tearDown(self):
self.remove_user_not_home(self.username)
def testUsermod(self):
"Test modifying user attributes"
self.local.usermod(self.username,
gecos="foo bar",
homedir="/home/foobar",
shell="/bin/zsh")
self.validate_user(self.username,
gecos="foo bar",
homeDirectory="/home/foobar",
loginShell="/bin/zsh")
def testUsermodUID(self):
"Test modifying UID"
self.local.usermod(self.username,
uid=1024)
self.validate_user(self.username,
uidNumber=1024)
def testUsermodGroupMembership(self):
"Test adding to and removing from groups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.usermod(self.username,
addgroups=["gr1", "gr2"])
self.assertUserMembership(self.username,
["gr1", "gr2"])
self.local.usermod(self.username,
rmgroups=["gr2"])
self.assertUserMembership(self.username,
["gr1"])
self.local.usermod(self.username,
rmgroups=["gr1"])
self.assertUserMembership(self.username,
[])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
def testUsermodLockUnlock(self):
"Test locking and unlocking user"
self.local.usermod(self.username,
lock=self.local.lock)
self.validate_user(self.username,
disabled="true")
self.local.usermod(self.username,
lock=self.local.unlock)
self.validate_user(self.username,
disabled="false")
class GroupaddTest(LocalTest):
def tearDown(self):
if self.groupname:
self.remove_group(self.groupname)
def testGroupadd(self):
"Test adding a local group"
self.groupname = "testGroupadd"
self.local.groupadd(self.groupname)
self.validate_group(self.groupname)
def testGroupaddWithGID(self):
"Test adding a local group with a custom GID"
self.groupname = "testUseraddWithGID"
self.local.groupadd(self.groupname,
gid=1024)
self.validate_group(self.groupname,
gidNumber=1024)
class GroupaddTestNegative(LocalTest):
def testGroupaddNoParams(self):
"Test that local.groupadd() requires the groupname parameter"
self.assertRaises(TypeError, self.local.groupadd)
def testGroupaddUserAlreadyExists(self):
"Test adding a local with a duplicate name"
self.groupname = "testGroupaddUserAlreadyExists"
self.local.groupadd(self.groupname)
try:
self.local.groupadd(self.groupname)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_group(self.groupname)
def testGroupaddGIDAlreadyExists(self):
"Test adding a local with a duplicate group ID"
self.groupname = "testGroupaddGIDAlreadyExists1"
self.local.groupadd(self.groupname, gid=1025)
try:
self.local.groupadd("testGroupaddGIDAlreadyExists2", gid=1025)
except IOError as e:
self.assertEquals(e.errno, errno.EEXIST)
else:
self.fail("Was expecting exception")
finally:
self.remove_group(self.groupname)
class GroupdelTest(LocalTest):
def testGroupdel(self):
self.add_group("testGroupdel")
self.validate_group("testGroupdel")
self.local.groupdel("testGroupdel")
self.validate_no_group("testGroupdel")
def testGroupdelNegative(self):
self.validate_no_group("testGroupdelNegative")
try:
self.local.groupdel("testGroupdelNegative")
except IOError as e:
self.assertEquals(e.errno, errno.ENOENT)
else:
fail("Was expecting exception")
class GroupmodTest(LocalTest):
def setUp(self):
self.local = pysss.local()
self.groupname = "GroupmodTest"
self.add_group(self.groupname)
def tearDown(self):
self.remove_group(self.groupname)
def testGroupmodGID(self):
"Test modifying UID"
self.local.groupmod(self.groupname,
gid=1024)
self.validate_group(self.groupname,
gidNumber=1024)
def testGroupmodGroupMembership(self):
"Test adding to groups"
self.add_group("gr1")
self.add_group("gr2")
try:
self.local.groupmod(self.groupname,
addgroups=["gr1", "gr2"])
self.assertGroupMembership(self.groupname,
["gr1", "gr2"])
self.local.groupmod(self.groupname,
rmgroups=["gr2"])
self.assertGroupMembership(self.groupname,
["gr1"])
self.local.groupmod(self.groupname,
rmgroups=["gr1"])
self.assertGroupMembership(self.groupname,
[])
finally:
self.remove_group("gr1")
self.remove_group("gr2")
# -------------- run the test suite -------------- #
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| -9,133,285,064,867,367,000 | 6,541,245,615,821,203,000 | 35.224359 | 79 | 0.584557 | false |
ericbaze/continuum_code_2012
|
pydata/moin/pythonenv/local/lib/python2.7/site-packages/pip-1.1-py2.7.egg/pip/commands/search.py
|
60
|
4523
|
import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
name = 'search'
usage = '%prog QUERY'
summary = 'Search PyPI'
def __init__(self):
super(SearchCommand, self).__init__()
self.parser.add_option(
'--index',
dest='index',
metavar='URL',
default='http://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
SearchCommand()
|
gpl-2.0
| 909,014,557,687,459,500 | 7,353,416,486,360,129,000 | 34.614173 | 102 | 0.606898 | false |
dagwieers/ansible
|
lib/ansible/modules/network/cloudengine/ce_mlag_interface.py
|
31
|
35442
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_mlag_interface
version_added: "2.4"
short_description: Manages MLAG interfaces on HUAWEI CloudEngine switches.
description:
- Manages MLAG interface attributes on HUAWEI CloudEngine switches.
author:
- Li Yanfeng (@QijunPan)
options:
eth_trunk_id:
description:
- Name of the local M-LAG interface. The value is ranging from 0 to 511.
dfs_group_id:
description:
- ID of a DFS group.The value is 1.
default: present
mlag_id:
description:
- ID of the M-LAG. The value is an integer that ranges from 1 to 2048.
mlag_system_id:
description:
- M-LAG global LACP system MAC address. The value is a string of 0 to 255 characters. The default value
is the MAC address of the Ethernet port of MPU.
mlag_priority_id:
description:
- M-LAG global LACP system priority. The value is an integer ranging from 0 to 65535.
The default value is 32768.
interface:
description:
- Name of the interface that enters the Error-Down state when the peer-link fails.
The value is a string of 1 to 63 characters.
mlag_error_down:
description:
- Configure the interface on the slave device to enter the Error-Down state.
choices: ['enable','disable']
state:
description:
- Specify desired state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: mlag interface module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Set interface mlag error down
ce_mlag_interface:
interface: 10GE2/0/1
mlag_error_down: enable
provider: "{{ cli }}"
- name: Create mlag
ce_mlag_interface:
eth_trunk_id: 1
dfs_group_id: 1
mlag_id: 4
provider: "{{ cli }}"
- name: Set mlag global attribute
ce_mlag_interface:
mlag_system_id: 0020-1409-0407
mlag_priority_id: 5
provider: "{{ cli }}"
- name: Set mlag interface attribute
ce_mlag_interface:
eth_trunk_id: 1
mlag_system_id: 0020-1409-0400
mlag_priority_id: 3
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: { "interface": "eth-trunk1",
"mlag_error_down": "disable",
"state": "present"
}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: { "mlagErrorDownInfos": [
{
"dfsgroupId": "1",
"portName": "Eth-Trunk1"
}
]
}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {}
updates:
description: command sent to the device
returned: always
type: list
sample: { "interface eth-trunk1",
"undo m-lag unpaired-port suspend"}
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import load_config
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_MLAG_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance>
</mlagInstance>
</mlagInstances>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="create">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<mlagInstances>
<mlagInstance operation="delete">
<dfsgroupId>%s</dfsgroupId>
<mlagId>%s</mlagId>
<localMlagPort>%s</localMlagPort>
</mlagInstance>
</mlagInstances>
</mlag>
</config>
"""
CE_NC_GET_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</filter>
"""
CE_NC_SET_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>%s</ifName>
<lacpMlagIf operation="merge">
"""
CE_NC_SET_LACP_MLAG_INFO_TAIL = """
</lacpMlagIf>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</config>
"""
CE_NC_GET_GLOBAL_LACP_MLAG_INFO = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal>
<lacpMlagSysId></lacpMlagSysId>
<lacpMlagPriority></lacpMlagPriority>
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</filter>
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<lacpSysInfo>
<lacpMlagGlobal operation="merge">
"""
CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL = """
</lacpMlagGlobal>
</lacpSysInfo>
</ifmtrunk>
</config>
"""
CE_NC_GET_MLAG_ERROR_DOWN_INFO = """
<filter type="subtree">
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown>
<dfsgroupId></dfsgroupId>
<portName></portName>
<portState></portState>
</errordown>
</errordowns>
</mlag>
</filter>
"""
CE_NC_CREATE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="create">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
CE_NC_DELETE_MLAG_ERROR_DOWN_INFO = """
<config>
<mlag xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<errordowns>
<errordown operation="delete">
<dfsgroupId>1</dfsgroupId>
<portName>%s</portName>
</errordown>
</errordowns>
</mlag>
</config>
"""
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class MlagInterface(object):
"""
Manages Manages MLAG interface information.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.eth_trunk_id = self.module.params['eth_trunk_id']
self.dfs_group_id = self.module.params['dfs_group_id']
self.mlag_id = self.module.params['mlag_id']
self.mlag_system_id = self.module.params['mlag_system_id']
self.mlag_priority_id = self.module.params['mlag_priority_id']
self.interface = self.module.params['interface']
self.mlag_error_down = self.module.params['mlag_error_down']
self.state = self.module.params['state']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.existing = dict()
self.proposed = dict()
self.end_state = dict()
# mlag info
self.commands = list()
self.mlag_info = None
self.mlag_global_info = None
self.mlag_error_down_info = None
self.mlag_trunk_attribute_info = None
def init_module(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd) # set to device
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd) # show updates result
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def get_mlag_info(self):
""" get mlag info."""
mlag_info = dict()
conf_str = CE_NC_GET_MLAG_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
mlag_info["mlagInfos"] = list()
root = ElementTree.fromstring(xml_str)
dfs_mlag_infos = root.findall(
"data/mlag/mlagInstances/mlagInstance")
if dfs_mlag_infos:
for dfs_mlag_info in dfs_mlag_infos:
mlag_dict = dict()
for ele in dfs_mlag_info:
if ele.tag in ["dfsgroupId", "mlagId", "localMlagPort"]:
mlag_dict[ele.tag] = ele.text
mlag_info["mlagInfos"].append(mlag_dict)
return mlag_info
def get_mlag_global_info(self):
""" get mlag global info."""
mlag_global_info = dict()
conf_str = CE_NC_GET_GLOBAL_LACP_MLAG_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_global_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/ifmtrunk/lacpSysInfo/lacpMlagGlobal")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]:
mlag_global_info[site.tag] = site.text
return mlag_global_info
def get_mlag_trunk_attribute_info(self):
""" get mlag global info."""
mlag_trunk_attribute_info = dict()
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
conf_str = CE_NC_GET_LACP_MLAG_INFO % eth_trunk
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_trunk_attribute_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
global_info = root.findall(
"data/ifmtrunk/TrunkIfs/TrunkIf/lacpMlagIf")
if global_info:
for tmp in global_info:
for site in tmp:
if site.tag in ["lacpMlagSysId", "lacpMlagPriority"]:
mlag_trunk_attribute_info[site.tag] = site.text
return mlag_trunk_attribute_info
def get_mlag_error_down_info(self):
""" get error down info."""
mlag_error_down_info = dict()
conf_str = CE_NC_GET_MLAG_ERROR_DOWN_INFO
xml_str = get_nc_config(self.module, conf_str)
if "<data/>" in xml_str:
return mlag_error_down_info
else:
xml_str = xml_str.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
mlag_error_down_info["mlagErrorDownInfos"] = list()
root = ElementTree.fromstring(xml_str)
mlag_error_infos = root.findall(
"data/mlag/errordowns/errordown")
if mlag_error_infos:
for mlag_error_info in mlag_error_infos:
mlag_error_dict = dict()
for ele in mlag_error_info:
if ele.tag in ["dfsgroupId", "portName"]:
mlag_error_dict[ele.tag] = ele.text
mlag_error_down_info[
"mlagErrorDownInfos"].append(mlag_error_dict)
return mlag_error_down_info
def check_macaddr(self):
"""check mac-address whether valid"""
valid_char = '0123456789abcdef-'
mac = self.mlag_system_id
if len(mac) > 16:
return False
mac_list = re.findall(r'([0-9a-fA-F]+)', mac)
if len(mac_list) != 3:
return False
if mac.count('-') != 2:
return False
for _, value in enumerate(mac, start=0):
if value.lower() not in valid_char:
return False
return True
def check_params(self):
"""Check all input params"""
# eth_trunk_id check
if self.eth_trunk_id:
if not self.eth_trunk_id.isdigit():
self.module.fail_json(
msg='Error: The value of eth_trunk_id is an integer.')
if int(self.eth_trunk_id) < 0 or int(self.eth_trunk_id) > 511:
self.module.fail_json(
msg='Error: The value of eth_trunk_id is not in the range from 0 to 511.')
# dfs_group_id check
if self.dfs_group_id:
if self.dfs_group_id != "1":
self.module.fail_json(
msg='Error: The value of dfs_group_id must be 1.')
# mlag_id check
if self.mlag_id:
if not self.mlag_id.isdigit():
self.module.fail_json(
msg='Error: The value of mlag_id is an integer.')
if int(self.mlag_id) < 1 or int(self.mlag_id) > 2048:
self.module.fail_json(
msg='Error: The value of mlag_id is not in the range from 1 to 2048.')
# mlag_system_id check
if self.mlag_system_id:
if not self.check_macaddr():
self.module.fail_json(
msg="Error: mlag_system_id has invalid value %s." % self.mlag_system_id)
# mlag_priority_id check
if self.mlag_priority_id:
if not self.mlag_priority_id.isdigit():
self.module.fail_json(
msg='Error: The value of mlag_priority_id is an integer.')
if int(self.mlag_priority_id) < 0 or int(self.mlag_priority_id) > 254:
self.module.fail_json(
msg='Error: The value of mlag_priority_id is not in the range from 0 to 254.')
# interface check
if self.interface:
intf_type = get_interface_type(self.interface)
if not intf_type:
self.module.fail_json(
msg='Error: Interface name of %s '
'is error.' % self.interface)
def is_mlag_info_change(self):
"""whether mlag info change"""
if not self.mlag_info:
return True
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
for info in self.mlag_info["mlagInfos"]:
if info["mlagId"] == self.mlag_id and info["localMlagPort"] == eth_trunk:
return False
return True
def is_mlag_info_exist(self):
"""whether mlag info exist"""
if not self.mlag_info:
return False
eth_trunk = "Eth-Trunk"
eth_trunk += self.eth_trunk_id
for info in self.mlag_info["mlagInfos"]:
if info["mlagId"] == self.mlag_id and info["localMlagPort"] == eth_trunk:
return True
return False
def is_mlag_error_down_info_change(self):
"""whether mlag error down info change"""
if not self.mlag_error_down_info:
return True
for info in self.mlag_error_down_info["mlagErrorDownInfos"]:
if info["portName"].upper() == self.interface.upper():
return False
return True
def is_mlag_error_down_info_exist(self):
"""whether mlag error down info exist"""
if not self.mlag_error_down_info:
return False
for info in self.mlag_error_down_info["mlagErrorDownInfos"]:
if info["portName"].upper() == self.interface.upper():
return True
return False
def is_mlag_interface_info_change(self):
"""whether mlag interface attribute info change"""
if not self.mlag_trunk_attribute_info:
return True
if self.mlag_system_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] != self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_trunk_attribute_info["lacpMlagPriority"] != self.mlag_priority_id:
return True
return False
def is_mlag_interface_info_exist(self):
"""whether mlag interface attribute info exist"""
if not self.mlag_trunk_attribute_info:
return False
if self.mlag_system_id:
if self.mlag_priority_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_system_id:
if self.mlag_trunk_attribute_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_trunk_attribute_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
return False
def is_mlag_global_info_change(self):
"""whether mlag global attribute info change"""
if not self.mlag_global_info:
return True
if self.mlag_system_id:
if self.mlag_global_info["lacpMlagSysId"] != self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_global_info["lacpMlagPriority"] != self.mlag_priority_id:
return True
return False
def is_mlag_global_info_exist(self):
"""whether mlag global attribute info exist"""
if not self.mlag_global_info:
return False
if self.mlag_system_id:
if self.mlag_priority_id:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id:
return True
if self.mlag_priority_id:
if self.mlag_system_id:
if self.mlag_global_info["lacpMlagSysId"] == self.mlag_system_id \
and self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
else:
if self.mlag_global_info["lacpMlagPriority"] == self.mlag_priority_id:
return True
return False
def create_mlag(self):
"""create mlag info"""
if self.is_mlag_info_change():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_CREATE_MLAG_INFO % (
self.dfs_group_id, self.mlag_id, mlag_port)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: create mlag info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
self.updates_cmd.append("dfs-group %s m-lag %s" %
(self.dfs_group_id, self.mlag_id))
self.changed = True
def delete_mlag(self):
"""delete mlag info"""
if self.is_mlag_info_exist():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_DELETE_MLAG_INFO % (
self.dfs_group_id, self.mlag_id, mlag_port)
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: delete mlag info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
self.updates_cmd.append(
"undo dfs-group %s m-lag %s" % (self.dfs_group_id, self.mlag_id))
self.changed = True
def create_mlag_error_down(self):
"""create mlag error down info"""
if self.is_mlag_error_down_info_change():
conf_str = CE_NC_CREATE_MLAG_ERROR_DOWN_INFO % self.interface
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: create mlag error down info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("m-lag unpaired-port suspend")
self.changed = True
def delete_mlag_error_down(self):
"""delete mlag error down info"""
if self.is_mlag_error_down_info_exist():
conf_str = CE_NC_DELETE_MLAG_ERROR_DOWN_INFO % self.interface
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: delete mlag error down info failed.')
self.updates_cmd.append("interface %s" % self.interface)
self.updates_cmd.append("undo m-lag unpaired-port suspend")
self.changed = True
def set_mlag_interface(self):
"""set mlag interface atrribute info"""
if self.is_mlag_interface_info_change():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
conf_str = CE_NC_SET_LACP_MLAG_INFO_HEAD % mlag_port
if self.mlag_priority_id:
conf_str += "<lacpMlagPriority>%s</lacpMlagPriority>" % self.mlag_priority_id
if self.mlag_system_id:
conf_str += "<lacpMlagSysId>%s</lacpMlagSysId>" % self.mlag_system_id
conf_str += CE_NC_SET_LACP_MLAG_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set mlag interface atrribute info failed.')
self.updates_cmd.append("interface %s" % mlag_port)
if self.mlag_priority_id:
self.updates_cmd.append(
"lacp m-lag priority %s" % self.mlag_priority_id)
if self.mlag_system_id:
self.updates_cmd.append(
"lacp m-lag system-id %s" % self.mlag_system_id)
self.changed = True
def delete_mlag_interface(self):
"""delete mlag interface attribute info"""
if self.is_mlag_interface_info_exist():
mlag_port = "Eth-Trunk"
mlag_port += self.eth_trunk_id
cmd = "interface %s" % mlag_port
self.cli_add_command(cmd)
if self.mlag_priority_id:
cmd = "lacp m-lag priority %s" % self.mlag_priority_id
self.cli_add_command(cmd, True)
if self.mlag_system_id:
cmd = "lacp m-lag system-id %s" % self.mlag_system_id
self.cli_add_command(cmd, True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def set_mlag_global(self):
"""set mlag global attribute info"""
if self.is_mlag_global_info_change():
conf_str = CE_NC_SET_GLOBAL_LACP_MLAG_INFO_HEAD
if self.mlag_priority_id:
conf_str += "<lacpMlagPriority>%s</lacpMlagPriority>" % self.mlag_priority_id
if self.mlag_system_id:
conf_str += "<lacpMlagSysId>%s</lacpMlagSysId>" % self.mlag_system_id
conf_str += CE_NC_SET_GLOBAL_LACP_MLAG_INFO_TAIL
recv_xml = set_nc_config(self.module, conf_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(
msg='Error: set mlag interface atrribute info failed.')
if self.mlag_priority_id:
self.updates_cmd.append(
"lacp m-lag priority %s" % self.mlag_priority_id)
if self.mlag_system_id:
self.updates_cmd.append(
"lacp m-lag system-id %s" % self.mlag_system_id)
self.changed = True
def delete_mlag_global(self):
"""delete mlag global attribute info"""
if self.is_mlag_global_info_exist():
if self.mlag_priority_id:
cmd = "lacp m-lag priority %s" % self.mlag_priority_id
self.cli_add_command(cmd, True)
if self.mlag_system_id:
cmd = "lacp m-lag system-id %s" % self.mlag_system_id
self.cli_add_command(cmd, True)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_proposed(self):
"""get proposed info"""
if self.eth_trunk_id:
self.proposed["eth_trunk_id"] = self.eth_trunk_id
if self.dfs_group_id:
self.proposed["dfs_group_id"] = self.dfs_group_id
if self.mlag_id:
self.proposed["mlag_id"] = self.mlag_id
if self.mlag_system_id:
self.proposed["mlag_system_id"] = self.mlag_system_id
if self.mlag_priority_id:
self.proposed["mlag_priority_id"] = self.mlag_priority_id
if self.interface:
self.proposed["interface"] = self.interface
if self.mlag_error_down:
self.proposed["mlag_error_down"] = self.mlag_error_down
if self.state:
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
self.mlag_info = self.get_mlag_info()
self.mlag_global_info = self.get_mlag_global_info()
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
if not self.mlag_system_id and not self.mlag_priority_id:
if self.mlag_info:
self.existing["mlagInfos"] = self.mlag_info["mlagInfos"]
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
if self.mlag_trunk_attribute_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_trunk_attribute_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_trunk_attribute_info[
"lacpMlagPriority"]
else:
if self.mlag_global_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_global_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_global_info[
"lacpMlagPriority"]
if self.interface or self.mlag_error_down:
if self.mlag_error_down_info:
self.existing["mlagErrorDownInfos"] = self.mlag_error_down_info[
"mlagErrorDownInfos"]
def get_end_state(self):
"""get end state info"""
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
self.mlag_info = self.get_mlag_info()
if not self.mlag_system_id and not self.mlag_priority_id:
if self.mlag_info:
self.end_state["mlagInfos"] = self.mlag_info["mlagInfos"]
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info()
if self.mlag_trunk_attribute_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_trunk_attribute_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_trunk_attribute_info[
"lacpMlagPriority"]
else:
self.mlag_global_info = self.get_mlag_global_info()
if self.mlag_global_info:
if self.mlag_system_id:
self.end_state["lacpMlagSysId"] = self.mlag_global_info[
"lacpMlagSysId"]
if self.mlag_priority_id:
self.end_state["lacpMlagPriority"] = self.mlag_global_info[
"lacpMlagPriority"]
if self.interface or self.mlag_error_down:
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.mlag_error_down_info:
self.end_state["mlagErrorDownInfos"] = self.mlag_error_down_info[
"mlagErrorDownInfos"]
def work(self):
"""worker"""
self.check_params()
self.get_proposed()
self.get_existing()
if self.eth_trunk_id or self.dfs_group_id or self.mlag_id:
self.mlag_info = self.get_mlag_info()
if self.eth_trunk_id and self.dfs_group_id and self.mlag_id:
if self.state == "present":
self.create_mlag()
else:
self.delete_mlag()
else:
if not self.mlag_system_id and not self.mlag_priority_id:
self.module.fail_json(
msg='Error: eth_trunk_id, dfs_group_id, mlag_id must be config at the same time.')
if self.mlag_system_id or self.mlag_priority_id:
if self.eth_trunk_id:
self.mlag_trunk_attribute_info = self.get_mlag_trunk_attribute_info()
if self.mlag_system_id or self.mlag_priority_id:
if self.state == "present":
self.set_mlag_interface()
else:
self.delete_mlag_interface()
else:
self.mlag_global_info = self.get_mlag_global_info()
if self.mlag_system_id or self.mlag_priority_id:
if self.state == "present":
self.set_mlag_global()
else:
self.delete_mlag_global()
if self.interface or self.mlag_error_down:
self.mlag_error_down_info = self.get_mlag_error_down_info()
if self.interface and self.mlag_error_down:
if self.mlag_error_down == "enable":
self.create_mlag_error_down()
else:
self.delete_mlag_error_down()
else:
self.module.fail_json(
msg='Error: interface, mlag_error_down must be config at the same time.')
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
eth_trunk_id=dict(type='str'),
dfs_group_id=dict(type='str'),
mlag_id=dict(type='str'),
mlag_system_id=dict(type='str'),
mlag_priority_id=dict(type='str'),
interface=dict(type='str'),
mlag_error_down=dict(type='str', choices=['enable', 'disable']),
state=dict(type='str', default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = MlagInterface(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
|
gpl-3.0
| 2,770,747,173,053,693,000 | 5,616,976,440,144,700,000 | 33.918227 | 115 | 0.557954 | false |
sid88in/incubator-airflow
|
airflow/contrib/operators/winrm_operator.py
|
13
|
5811
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from base64 import b64encode
import logging
from winrm.exceptions import WinRMOperationTimeoutError
from airflow import configuration
from airflow.contrib.hooks.winrm_hook import WinRMHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
# Hide the following error message in urllib3 when making WinRM connections:
# requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(),
# MultipartInvariantViolationDefect()], unparsed data: ''
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.CRITICAL)
class WinRMOperator(BaseOperator):
"""
WinRMOperator to execute commands on given remote host using the winrm_hook.
:param winrm_hook: predefined ssh_hook to use for remote execution
:type winrm_hook: :class:`WinRMHook`
:param ssh_conn_id: connection id from airflow Connections
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param command: command to execute on remote host. (templated)
:type command: str
:param timeout: timeout for executing the command.
:type timeout: int
:param do_xcom_push: return the stdout which also get set in xcom by airflow platform
:type do_xcom_push: bool
"""
template_fields = ('command',)
@apply_defaults
def __init__(self,
winrm_hook=None,
ssh_conn_id=None,
remote_host=None,
command=None,
timeout=10,
do_xcom_push=False,
*args,
**kwargs):
super(WinRMOperator, self).__init__(*args, **kwargs)
self.winrm_hook = winrm_hook
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.command = command
self.timeout = timeout
self.do_xcom_push = do_xcom_push
def execute(self, context):
if self.ssh_conn_id and not self.winrm_hook:
self.log.info("Hook not found, creating...")
self.winrm_hook = WinRMHook(ssh_conn_id=self.ssh_conn_id)
if not self.winrm_hook:
raise AirflowException("Cannot operate without winrm_hook or ssh_conn_id.")
if self.remote_host is not None:
self.winrm_hook.remote_host = self.remote_host
if not self.command:
raise AirflowException("No command specified so nothing to execute here.")
winrm_client = self.winrm_hook.get_conn()
try:
self.log.info("Running command: '{command}'...".format(command=self.command))
command_id = self.winrm_hook.winrm_protocol.run_command(
winrm_client,
self.command
)
# See: https://github.com/diyan/pywinrm/blob/master/winrm/protocol.py
stdout_buffer = []
stderr_buffer = []
command_done = False
while not command_done:
try:
stdout, stderr, return_code, command_done = \
self.winrm_hook.winrm_protocol._raw_get_command_output(
winrm_client,
command_id
)
# Only buffer stdout if we need to so that we minimize memory usage.
if self.do_xcom_push:
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
for line in stdout.decode('utf-8').splitlines():
self.log.info(line)
for line in stderr.decode('utf-8').splitlines():
self.log.warning(line)
except WinRMOperationTimeoutError as e:
# this is an expected error when waiting for a
# long-running process, just silently retry
pass
self.winrm_hook.winrm_protocol.cleanup_command(winrm_client, command_id)
self.winrm_hook.winrm_protocol.close_shell(winrm_client)
except Exception as e:
raise AirflowException("WinRM operator error: {0}".format(str(e)))
if return_code is 0:
# returning output if do_xcom_push is set
if self.do_xcom_push:
enable_pickling = configuration.conf.getboolean(
'core', 'enable_xcom_pickling'
)
if enable_pickling:
return stdout_buffer
else:
return b64encode(b''.join(stdout_buffer)).decode('utf-8')
else:
error_msg = "Error running cmd: {0}, return code: {1}, error: {2}".format(
self.command,
return_code,
b''.join(stderr_buffer).decode('utf-8')
)
raise AirflowException(error_msg)
self.log.info("Finished!")
return True
|
apache-2.0
| 7,607,829,086,429,986,000 | 4,287,286,769,898,313,000 | 38 | 90 | 0.609706 | false |
italomaia/turtle-linux
|
games/Dynamite/pgu/test.py
|
1
|
1624
|
import pygame
from pygame.locals import *
import gui
screen = pygame.display.set_mode(
(640, 480), FULLSCREEN ) # try adding DOUBLEBUF | HWSURFACE
# pygame.mouse.set_visible(0)
app = gui.App()
c = gui.Container(width=640,height=480)
##
## dialog 1
##
t1 = gui.Table()
t1.tr()
t1.add(gui.Label("Gal Test"))
t2 = gui.Table()
t2.tr()
t2.add(gui.Label("Gui Widgets"))
t2.add(gui.Input())
t2.tr()
t2.add(gui.Label("Button"))
t2.add(gui.Button("Click Me!"))
d1 = gui.Dialog(t1, t2)
c.add(d1, 50, 150)
##
## dialog 2
##
t3 = gui.Table()
t3.tr()
t3.add(gui.Label("Another one"))
t4 = gui.Table()
t4.tr()
t4.add(gui.Label("Name"))
t4.add(gui.Input())
t4.tr()
t4.add(gui.Label("Ohh"))
b1 = gui.Button("OK")
t4.add(b1)
d2 = gui.Dialog(t3, t4)
c.add(d2, 50, 300)
##
## some labels
##
l1 = gui.Label("Suppose this is a menu", color=(255, 255, 255) )
c.add(l1, 50, 50)
l2 = gui.Label("Click <SPACE> to hide top dialog", color=(255, 255,
255) )
c.add(l2, 50, 75)
l3 = gui.Label("Opps... Did it happen?", color=(255, 255, 255) )
##
## app begins
##
app.init(widget=c,screen=screen)
FRAME_EVT = USEREVENT + 1
pygame.event.Event(FRAME_EVT)
pygame.time.set_timer(FRAME_EVT, 30)
_quit = 0
while _quit == 0:
event = pygame.event.wait()
if event.type == FRAME_EVT:
pygame.display.flip()
continue
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
_quit = 1
continue
elif event.key == K_SPACE:
d1.close()
c.add(l3, 100, 100)
app._event(event)
screen.fill((0,0,0))
app.paint(screen)
|
gpl-3.0
| -3,771,427,222,266,549,000 | -3,261,690,242,088,964,600 | 14.037037 | 68 | 0.589286 | false |
Sixshaman/networkx
|
networkx/utils/random_sequence.py
|
10
|
6411
|
"""
Utilities for generating random numbers, random sequences, and
random selections.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import random
import sys
import networkx as nx
__author__ = '\n'.join(['Aric Hagberg ([email protected])',
'Dan Schult([email protected])',
'Ben Edwards([email protected])'])
import warnings as _warnings
def create_degree_sequence(n, sfunction=None, max_tries=50, **kwds):
_warnings.warn("create_degree_sequence() is deprecated",
DeprecationWarning)
""" Attempt to create a valid degree sequence of length n using
specified function sfunction(n,**kwds).
Parameters
----------
n : int
Length of degree sequence = number of nodes
sfunction: function
Function which returns a list of n real or integer values.
Called as "sfunction(n,**kwds)".
max_tries: int
Max number of attempts at creating valid degree sequence.
Notes
-----
Repeatedly create a degree sequence by calling sfunction(n,**kwds)
until achieving a valid degree sequence. If unsuccessful after
max_tries attempts, raise an exception.
For examples of sfunctions that return sequences of random numbers,
see networkx.Utils.
Examples
--------
>>> from networkx.utils import uniform_sequence, create_degree_sequence
>>> seq=create_degree_sequence(10,uniform_sequence)
"""
tries=0
max_deg=n
while tries < max_tries:
trialseq=sfunction(n,**kwds)
# round to integer values in the range [0,max_deg]
seq=[min(max_deg, max( int(round(s)),0 )) for s in trialseq]
# if graphical return, else throw away and try again
if nx.is_valid_degree_sequence(seq):
return seq
tries+=1
raise nx.NetworkXError(\
"Exceeded max (%d) attempts at a valid sequence."%max_tries)
# The same helpers for choosing random sequences from distributions
# uses Python's random module
# http://www.python.org/doc/current/lib/module-random.html
def pareto_sequence(n,exponent=1.0):
"""
Return sample sequence of length n from a Pareto distribution.
"""
return [random.paretovariate(exponent) for i in range(n)]
def powerlaw_sequence(n,exponent=2.0):
"""
Return sample sequence of length n from a power law distribution.
"""
return [random.paretovariate(exponent-1) for i in range(n)]
def zipf_rv(alpha, xmin=1, seed=None):
r"""Return a random value chosen from the Zipf distribution.
The return value is an integer drawn from the probability distribution
::math::
p(x)=\frac{x^{-\alpha}}{\zeta(\alpha,x_{min})},
where `\zeta(\alpha,x_{min})` is the Hurwitz zeta function.
Parameters
----------
alpha : float
Exponent value of the distribution
xmin : int
Minimum value
seed : int
Seed value for random number generator
Returns
-------
x : int
Random value from Zipf distribution
Raises
------
ValueError:
If xmin < 1 or
If alpha <= 1
Notes
-----
The rejection algorithm generates random values for a the power-law
distribution in uniformly bounded expected time dependent on
parameters. See [1] for details on its operation.
Examples
--------
>>> nx.zipf_rv(alpha=2, xmin=3, seed=42) # doctest: +SKIP
References
----------
..[1] Luc Devroye, Non-Uniform Random Variate Generation,
Springer-Verlag, New York, 1986.
"""
if xmin < 1:
raise ValueError("xmin < 1")
if alpha <= 1:
raise ValueError("a <= 1.0")
if not seed is None:
random.seed(seed)
a1 = alpha - 1.0
b = 2**a1
while True:
u = 1.0 - random.random() # u in (0,1]
v = random.random() # v in [0,1)
x = int(xmin*u**-(1.0/a1))
t = (1.0+(1.0/x))**a1
if v*x*(t-1.0)/(b-1.0) <= t/b:
break
return x
def zipf_sequence(n, alpha=2.0, xmin=1):
"""Return a sample sequence of length n from a Zipf distribution with
exponent parameter alpha and minimum value xmin.
See Also
--------
zipf_rv
"""
return [ zipf_rv(alpha,xmin) for _ in range(n)]
def uniform_sequence(n):
"""
Return sample sequence of length n from a uniform distribution.
"""
return [ random.uniform(0,n) for i in range(n)]
def cumulative_distribution(distribution):
"""Return normalized cumulative distribution from discrete distribution."""
cdf= [0.0]
psum=float(sum(distribution))
for i in range(0,len(distribution)):
cdf.append(cdf[i]+distribution[i]/psum)
return cdf
def discrete_sequence(n, distribution=None, cdistribution=None):
"""
Return sample sequence of length n from a given discrete distribution
or discrete cumulative distribution.
One of the following must be specified.
distribution = histogram of values, will be normalized
cdistribution = normalized discrete cumulative distribution
"""
import bisect
if cdistribution is not None:
cdf=cdistribution
elif distribution is not None:
cdf=cumulative_distribution(distribution)
else:
raise nx.NetworkXError(
"discrete_sequence: distribution or cdistribution missing")
# get a uniform random number
inputseq=[random.random() for i in range(n)]
# choose from CDF
seq=[bisect.bisect_left(cdf,s)-1 for s in inputseq]
return seq
def random_weighted_sample(mapping, k):
"""Return k items without replacement from a weighted sample.
The input is a dictionary of items with weights as values.
"""
if k > len(mapping):
raise ValueError("sample larger than population")
sample = set()
while len(sample) < k:
sample.add(weighted_choice(mapping))
return list(sample)
def weighted_choice(mapping):
"""Return a single element from a weighted sample.
The input is a dictionary of items with weights as values.
"""
# use roulette method
rnd = random.random() * sum(mapping.values())
for k, w in mapping.items():
rnd -= w
if rnd < 0:
return k
|
bsd-3-clause
| 6,471,734,493,646,453,000 | 8,109,075,758,710,186,000 | 27.620536 | 79 | 0.631883 | false |
ezbake/ezbake-common-python
|
thrift/thrift-utils/lib/ezbake/thrift/utils/ezthrifttest.py
|
1
|
3296
|
# Copyright (C) 2013-2014 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EzThriftTest contains classes that will be useful for testing thrift services
"""
from kazoo.testing import KazooTestCase
from ezbake.discovery import ServiceDiscoveryClient
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from thrift.transport import TSocket, TTransport
from thrift.transport.TTransport import TTransportException
from ..transport.EzSSLSocket import TSSLServerSocket
from multiprocessing.process import Process
import time
import logging
logger = logging.getLogger(__name__)
class EzThriftServerTestHarness(KazooTestCase):
"""The EzThriftServerTestHarness extends KazooTestCase to provide service discovery for clients in tests
The thrift server is started using a TSimpleServer and registered with EzBake service discovery
"""
def setUp(self):
super(EzThriftServerTestHarness, self).setUp()
self.sd_client = ServiceDiscoveryClient(self.hosts)
self.server_processes = []
@staticmethod
def __thrift_server(processor, host="localhost", port=8449, use_simple_server=True,
use_ssl=False, ca_certs=None, cert=None, key=None):
if use_ssl:
transport = TSSLServerSocket(host=host, port=port,
ca_certs=ca_certs, cert=cert, key=key)
else:
transport = TSocket.TServerSocket(host=host, port=port)
t_factory = TTransport.TBufferedTransportFactory()
p_factory = TBinaryProtocol.TBinaryProtocolFactory()
if use_simple_server:
server = TServer.TSimpleServer(processor, transport, t_factory, p_factory)
else:
server = TServer.TThreadedServer(processor, transport, t_factory, p_factory)
try:
server.serve()
except (Exception, AttributeError, TTransportException) as e:
print e
logger.error("Server error: %s", e)
def add_server(self, app_name, service_name, host, port, processor, use_simple_server=True, wait=1,
use_ssl=False, ca_certs=None, cert=None, key=None):
self.sd_client.register_endpoint(app_name, service_name, host, port)
server_process = Process(target=self.__thrift_server,
args=(processor, host, port, use_simple_server, use_ssl, ca_certs, cert, key))
server_process.start()
time.sleep(wait)
self.server_processes.append(server_process)
def tearDown(self):
super(EzThriftServerTestHarness, self).tearDown()
for server_process in self.server_processes:
if server_process.is_alive():
server_process.terminate()
|
apache-2.0
| -7,552,733,811,548,549,000 | 585,431,081,118,243,100 | 40.2125 | 111 | 0.68932 | false |
softliumin/redis-py
|
redis/client.py
|
22
|
100985
|
from __future__ import with_statement
from itertools import chain
import datetime
import sys
import warnings
import time
import threading
import time as mod_time
from redis._compat import (b, basestring, bytes, imap, iteritems, iterkeys,
itervalues, izip, long, nativestr, unicode,
safe_unicode)
from redis.connection import (ConnectionPool, UnixDomainSocketConnection,
SSLConnection, Token)
from redis.lock import Lock, LuaLock
from redis.exceptions import (
ConnectionError,
DataError,
ExecAbortError,
NoScriptError,
PubSubError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
SYM_EMPTY = b('')
def list_or_args(keys, args):
# returns a single list combining keys and args
try:
iter(keys)
# a string or bytes instance can be iterated, but indicates
# keys wasn't passed as a list
if isinstance(keys, (basestring, bytes)):
keys = [keys]
except TypeError:
keys = [keys]
if args:
keys.extend(args)
return keys
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def string_keys_to_dict(key_string, callback):
return dict.fromkeys(key_string.split(), callback)
def dict_merge(*dicts):
merged = {}
[merged.update(d) for d in dicts]
return merged
def parse_debug_object(response):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = nativestr(response)
response = 'type:' + response
response = dict([kv.split(':') for kv in response.split()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_object(response, infotype):
"Parse the results of an OBJECT command"
if infotype in ('idletime', 'refcount'):
return int_or_none(response)
return response
def parse_info(response):
"Parse the result of Redis's INFO command into a Python dict"
info = {}
response = nativestr(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
key, value = line.split(':', 1)
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
SENTINEL_STATE_TYPES = {
'can-failover-its-master': int,
'config-epoch': int,
'down-after-milliseconds': int,
'failover-timeout': int,
'info-refresh': int,
'last-hello-message': int,
'last-ok-ping-reply': int,
'last-ping-reply': int,
'last-ping-sent': int,
'master-link-down-time': int,
'master-port': int,
'num-other-sentinels': int,
'num-slaves': int,
'o-down-time': int,
'pending-commands': int,
'parallel-syncs': int,
'port': int,
'quorum': int,
'role-reported-time': int,
's-down-time': int,
'slave-priority': int,
'slave-repl-offset': int,
'voted-leader-epoch': int
}
def parse_sentinel_state(item):
result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES)
flags = set(result['flags'].split(','))
for name, flag in (('is_master', 'master'), ('is_slave', 'slave'),
('is_sdown', 's_down'), ('is_odown', 'o_down'),
('is_sentinel', 'sentinel'),
('is_disconnected', 'disconnected'),
('is_master_down', 'master_down')):
result[name] = flag in flags
return result
def parse_sentinel_master(response):
return parse_sentinel_state(imap(nativestr, response))
def parse_sentinel_masters(response):
result = {}
for item in response:
state = parse_sentinel_state(imap(nativestr, item))
result[state['name']] = state
return result
def parse_sentinel_slaves_and_sentinels(response):
return [parse_sentinel_state(imap(nativestr, item)) for item in response]
def parse_sentinel_get_master(response):
return response and (response[0], int(response[1])) or None
def pairs_to_dict(response):
"Create a dict given a list of key/value pairs"
it = iter(response)
return dict(izip(it, it))
def pairs_to_dict_typed(response, type_info):
it = iter(response)
result = {}
for key, value in izip(it, it):
if key in type_info:
try:
value = type_info[key](value)
except:
# if for some reason the value can't be coerced, just use
# the string value
pass
result[key] = value
return result
def zset_score_pairs(response, **options):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if not response or not options['withscores']:
return response
score_cast_func = options.get('score_cast_func', float)
it = iter(response)
return list(izip(it, imap(score_cast_func, it)))
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options['groups']:
return response
n = options['groups']
return list(izip(*[response[i::n] for i in range(n)]))
def int_or_none(response):
if response is None:
return None
return int(response)
def float_or_none(response):
if response is None:
return None
return float(response)
def bool_ok(response):
return nativestr(response) == 'OK'
def parse_client_list(response, **options):
clients = []
for c in nativestr(response).splitlines():
clients.append(dict([pair.split('=') for pair in c.split(' ')]))
return clients
def parse_config_get(response, **options):
response = [nativestr(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def parse_scan(response, **options):
cursor, r = response
return long(cursor), r
def parse_hscan(response, **options):
cursor, r = response
return long(cursor), r and pairs_to_dict(r) or {}
def parse_zscan(response, **options):
score_cast_func = options.get('score_cast_func', float)
cursor, r = response
it = iter(r)
return long(cursor), list(izip(it, imap(score_cast_func, it)))
def parse_slowlog_get(response, **options):
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command': b(' ').join(item[3])
} for item in response]
class StrictRedis(object):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict(
'AUTH EXISTS EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST '
'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX',
bool
),
string_keys_to_dict(
'BITCOUNT BITPOS DECRBY DEL GETBIT HDEL HLEN INCRBY LINSERT LLEN '
'LPUSHX PFADD PFCOUNT RPUSHX SADD SCARD SDIFFSTORE SETBIT '
'SETRANGE SINTERSTORE SREM STRLEN SUNIONSTORE ZADD ZCARD '
'ZLEXCOUNT ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE',
int
),
string_keys_to_dict('INCRBYFLOAT HINCRBYFLOAT', float),
string_keys_to_dict(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH',
lambda r: isinstance(r, long) and r or nativestr(r) == 'OK'
),
string_keys_to_dict('SORT', sort_return_tuples),
string_keys_to_dict('ZSCORE ZINCRBY', float_or_none),
string_keys_to_dict(
'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE RENAME '
'SAVE SELECT SHUTDOWN SLAVEOF WATCH UNWATCH',
bool_ok
),
string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None),
string_keys_to_dict(
'SDIFF SINTER SMEMBERS SUNION',
lambda r: r and set(r) or set()
),
string_keys_to_dict(
'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE',
zset_score_pairs
),
string_keys_to_dict('ZRANK ZREVRANK', int_or_none),
string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
{
'CLIENT GETNAME': lambda r: r and nativestr(r),
'CLIENT KILL': bool_ok,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'HGETALL': lambda r: r and pairs_to_dict(r) or {},
'HSCAN': parse_hscan,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'OBJECT': parse_object,
'PING': lambda r: nativestr(r) == 'PONG',
'RANDOMKEY': lambda r: r and r or None,
'SCAN': parse_scan,
'SCRIPT EXISTS': lambda r: list(imap(bool, r)),
'SCRIPT FLUSH': bool_ok,
'SCRIPT KILL': bool_ok,
'SCRIPT LOAD': nativestr,
'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master,
'SENTINEL MASTER': parse_sentinel_master,
'SENTINEL MASTERS': parse_sentinel_masters,
'SENTINEL MONITOR': bool_ok,
'SENTINEL REMOVE': bool_ok,
'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels,
'SENTINEL SET': bool_ok,
'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels,
'SET': lambda r: r and nativestr(r) == 'OK',
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'SSCAN': parse_scan,
'TIME': lambda x: (int(x[0]), int(x[1])),
'ZSCAN': parse_zscan
}
)
@classmethod
def from_url(cls, url, db=None, **kwargs):
"""
Return a Redis client object configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
"""
connection_pool = ConnectionPool.from_url(url, db=db, **kwargs)
return cls(connection_pool=connection_pool)
def __init__(self, host='localhost', port=6379,
db=0, password=None, socket_timeout=None,
socket_connect_timeout=None,
socket_keepalive=None, socket_keepalive_options=None,
connection_pool=None, unix_socket_path=None,
encoding='utf-8', encoding_errors='strict',
charset=None, errors=None,
decode_responses=False, retry_on_timeout=False,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None):
if not connection_pool:
if charset is not None:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
encoding = charset
if errors is not None:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
encoding_errors = errors
kwargs = {
'db': db,
'password': password,
'socket_timeout': socket_timeout,
'encoding': encoding,
'encoding_errors': encoding_errors,
'decode_responses': decode_responses,
'retry_on_timeout': retry_on_timeout
}
# based on input, setup appropriate connection args
if unix_socket_path is not None:
kwargs.update({
'path': unix_socket_path,
'connection_class': UnixDomainSocketConnection
})
else:
# TCP specific options
kwargs.update({
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
})
if ssl:
kwargs.update({
'connection_class': SSLConnection,
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
})
connection_pool = ConnectionPool(**kwargs)
self.connection_pool = connection_pool
self._use_lua_lock = None
self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy()
def __repr__(self):
return "%s<%s>" % (type(self).__name__, repr(self.connection_pool))
def set_response_callback(self, command, callback):
"Set a custom Response Callback"
self.response_callbacks[command] = callback
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return StrictPipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def transaction(self, func, *watches, **kwargs):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop('shard_hint', None)
value_from_callable = kwargs.pop('value_from_callable', False)
watch_delay = kwargs.pop('watch_delay', None)
with self.pipeline(True, shard_hint) as pipe:
while 1:
try:
if watches:
pipe.watch(*watches)
func_value = func(pipe)
exec_value = pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
if self._use_lua_lock is None:
# the first time .lock() is called, determine if we can use
# Lua by attempting to register the necessary scripts
try:
LuaLock.register_scripts(self)
self._use_lua_lock = True
except ResponseError:
self._use_lua_lock = False
lock_class = self._use_lua_lock and LuaLock or Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local)
def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return PubSub(self.connection_pool, **kwargs)
# COMMAND EXECUTION AND PROTOCOL PARSING
def execute_command(self, *args, **options):
"Execute a command and return a parsed response"
pool = self.connection_pool
command_name = args[0]
connection = pool.get_connection(command_name, **options)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
response = connection.read_response()
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response
# SERVER INFORMATION
def bgrewriteaof(self):
"Tell the Redis server to rewrite the AOF file from data in memory."
return self.execute_command('BGREWRITEAOF')
def bgsave(self):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return self.execute_command('BGSAVE')
def client_kill(self, address):
"Disconnects the client at ``address`` (ip:port)"
return self.execute_command('CLIENT KILL', address)
def client_list(self):
"Returns a list of currently connected clients"
return self.execute_command('CLIENT LIST')
def client_getname(self):
"Returns the current connection name"
return self.execute_command('CLIENT GETNAME')
def client_setname(self, name):
"Sets the current connection name"
return self.execute_command('CLIENT SETNAME', name)
def config_get(self, pattern="*"):
"Return a dictionary of configuration based on the ``pattern``"
return self.execute_command('CONFIG GET', pattern)
def config_set(self, name, value):
"Set config item ``name`` with ``value``"
return self.execute_command('CONFIG SET', name, value)
def config_resetstat(self):
"Reset runtime statistics"
return self.execute_command('CONFIG RESETSTAT')
def config_rewrite(self):
"Rewrite config file with the minimal change to reflect running config"
return self.execute_command('CONFIG REWRITE')
def dbsize(self):
"Returns the number of keys in the current database"
return self.execute_command('DBSIZE')
def debug_object(self, key):
"Returns version specific meta information about a given key"
return self.execute_command('DEBUG OBJECT', key)
def echo(self, value):
"Echo the string back from the server"
return self.execute_command('ECHO', value)
def flushall(self):
"Delete all keys in all databases on the current host"
return self.execute_command('FLUSHALL')
def flushdb(self):
"Delete all keys in the current database"
return self.execute_command('FLUSHDB')
def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return self.execute_command('INFO')
else:
return self.execute_command('INFO', section)
def lastsave(self):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return self.execute_command('LASTSAVE')
def object(self, infotype, key):
"Return the encoding, idletime, or refcount about the key"
return self.execute_command('OBJECT', infotype, key, infotype=infotype)
def ping(self):
"Ping the Redis server"
return self.execute_command('PING')
def save(self):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return self.execute_command('SAVE')
def sentinel(self, *args):
"Redis Sentinel's SENTINEL command."
warnings.warn(
DeprecationWarning('Use the individual sentinel_* methods'))
def sentinel_get_master_addr_by_name(self, service_name):
"Returns a (host, port) pair for the given ``service_name``"
return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME',
service_name)
def sentinel_master(self, service_name):
"Returns a dictionary containing the specified masters state."
return self.execute_command('SENTINEL MASTER', service_name)
def sentinel_masters(self):
"Returns a list of dictionaries containing each master's state."
return self.execute_command('SENTINEL MASTERS')
def sentinel_monitor(self, name, ip, port, quorum):
"Add a new master to Sentinel to be monitored"
return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
def sentinel_remove(self, name):
"Remove a master from Sentinel's monitoring"
return self.execute_command('SENTINEL REMOVE', name)
def sentinel_sentinels(self, service_name):
"Returns a list of sentinels for ``service_name``"
return self.execute_command('SENTINEL SENTINELS', service_name)
def sentinel_set(self, name, option, value):
"Set Sentinel monitoring parameters for a given master"
return self.execute_command('SENTINEL SET', name, option, value)
def sentinel_slaves(self, service_name):
"Returns a list of slaves for ``service_name``"
return self.execute_command('SENTINEL SLAVES', service_name)
def shutdown(self):
"Shutdown the server"
try:
self.execute_command('SHUTDOWN')
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
def slaveof(self, host=None, port=None):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return self.execute_command('SLAVEOF', Token('NO'), Token('ONE'))
return self.execute_command('SLAVEOF', host, port)
def slowlog_get(self, num=None):
"""
Get the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
return self.execute_command(*args)
def slowlog_len(self):
"Get the number of items in the slowlog"
return self.execute_command('SLOWLOG LEN')
def slowlog_reset(self):
"Remove all items in the slowlog"
return self.execute_command('SLOWLOG RESET')
def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return self.execute_command('TIME')
def wait(self, num_replicas, timeout):
"""
Redis synchronous replication
That returns the number of replicas that processed the query when
we finally have at least ``num_replicas``, or when the ``timeout`` was
reached.
"""
return self.execute_command('WAIT', num_replicas, timeout)
# BASIC KEY COMMANDS
def append(self, key, value):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return self.execute_command('APPEND', key, value)
def bitcount(self, key, start=None, end=None):
"""
Returns the count of set bits in the value of ``key``. Optional
``start`` and ``end`` paramaters indicate which bytes to consider
"""
params = [key]
if start is not None and end is not None:
params.append(start)
params.append(end)
elif (start is not None and end is None) or \
(end is not None and start is None):
raise RedisError("Both start and end must be specified")
return self.execute_command('BITCOUNT', *params)
def bitop(self, operation, dest, *keys):
"""
Perform a bitwise operation using ``operation`` between ``keys`` and
store the result in ``dest``.
"""
return self.execute_command('BITOP', operation, dest, *keys)
def bitpos(self, key, bit, start=None, end=None):
"""
Return the position of the first bit set to 1 or 0 in a string.
``start`` and ``end`` difines search range. The range is interpreted
as a range of bytes and not a range of bits, so start=0 and end=2
means to look at the first three bytes.
"""
if bit not in (0, 1):
raise RedisError('bit must be 0 or 1')
params = [key, bit]
start is not None and params.append(start)
if start is not None and end is not None:
params.append(end)
elif start is None and end is not None:
raise RedisError("start argument is not set, "
"when end is specified")
return self.execute_command('BITPOS', *params)
def decr(self, name, amount=1):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return self.execute_command('DECRBY', name, amount)
def delete(self, *names):
"Delete one or more keys specified by ``names``"
return self.execute_command('DEL', *names)
def __delitem__(self, name):
self.delete(name)
def dump(self, name):
"""
Return a serialized version of the value stored at the specified key.
If key does not exist a nil bulk reply is returned.
"""
return self.execute_command('DUMP', name)
def exists(self, name):
"Returns a boolean indicating whether key ``name`` exists"
return self.execute_command('EXISTS', name)
__contains__ = exists
def expire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` seconds. ``time``
can be represented by an integer or a Python timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('EXPIRE', name, time)
def expireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
when = int(mod_time.mktime(when.timetuple()))
return self.execute_command('EXPIREAT', name, when)
def get(self, name):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return self.execute_command('GET', name)
def __getitem__(self, name):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value = self.get(name)
if value:
return value
raise KeyError(name)
def getbit(self, name, offset):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return self.execute_command('GETBIT', name, offset)
def getrange(self, key, start, end):
"""
Returns the substring of the string value stored at ``key``,
determined by the offsets ``start`` and ``end`` (both are inclusive)
"""
return self.execute_command('GETRANGE', key, start, end)
def getset(self, name, value):
"""
Sets the value at key ``name`` to ``value``
and returns the old value at key ``name`` atomically.
"""
return self.execute_command('GETSET', name, value)
def incr(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return self.execute_command('INCRBY', name, amount)
def incrby(self, name, amount=1):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
# An alias for ``incr()``, because it is already implemented
# as INCRBY redis command.
return self.incr(name, amount)
def incrbyfloat(self, name, amount=1.0):
"""
Increments the value at key ``name`` by floating ``amount``.
If no key exists, the value will be initialized as ``amount``
"""
return self.execute_command('INCRBYFLOAT', name, amount)
def keys(self, pattern='*'):
"Returns a list of keys matching ``pattern``"
return self.execute_command('KEYS', pattern)
def mget(self, keys, *args):
"""
Returns a list of values ordered identically to ``keys``
"""
args = list_or_args(keys, args)
return self.execute_command('MGET', *args)
def mset(self, *args, **kwargs):
"""
Sets key/values based on a mapping. Mapping can be supplied as a single
dictionary argument or as kwargs.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSET requires **kwargs or a single dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSET', *items)
def msetnx(self, *args, **kwargs):
"""
Sets key/values based on a mapping if none of the keys are already set.
Mapping can be supplied as a single dictionary argument or as kwargs.
Returns a boolean indicating if the operation was successful.
"""
if args:
if len(args) != 1 or not isinstance(args[0], dict):
raise RedisError('MSETNX requires **kwargs or a single '
'dict arg')
kwargs.update(args[0])
items = []
for pair in iteritems(kwargs):
items.extend(pair)
return self.execute_command('MSETNX', *items)
def move(self, name, db):
"Moves the key ``name`` to a different Redis database ``db``"
return self.execute_command('MOVE', name, db)
def persist(self, name):
"Removes an expiration on ``name``"
return self.execute_command('PERSIST', name)
def pexpire(self, name, time):
"""
Set an expire flag on key ``name`` for ``time`` milliseconds.
``time`` can be represented by an integer or a Python timedelta
object.
"""
if isinstance(time, datetime.timedelta):
ms = int(time.microseconds / 1000)
time = (time.seconds + time.days * 24 * 3600) * 1000 + ms
return self.execute_command('PEXPIRE', name, time)
def pexpireat(self, name, when):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer representing unix time in milliseconds (unix time * 1000)
or a Python datetime object.
"""
if isinstance(when, datetime.datetime):
ms = int(when.microsecond / 1000)
when = int(mod_time.mktime(when.timetuple())) * 1000 + ms
return self.execute_command('PEXPIREAT', name, when)
def psetex(self, name, time_ms, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time_ms``
milliseconds. ``time_ms`` can be represented by an integer or a Python
timedelta object
"""
if isinstance(time_ms, datetime.timedelta):
ms = int(time_ms.microseconds / 1000)
time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms
return self.execute_command('PSETEX', name, time_ms, value)
def pttl(self, name):
"Returns the number of milliseconds until the key ``name`` will expire"
return self.execute_command('PTTL', name)
def randomkey(self):
"Returns the name of a random key"
return self.execute_command('RANDOMKEY')
def rename(self, src, dst):
"""
Rename key ``src`` to ``dst``
"""
return self.execute_command('RENAME', src, dst)
def renamenx(self, src, dst):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return self.execute_command('RENAMENX', src, dst)
def restore(self, name, ttl, value):
"""
Create a key using the provided serialized value, previously obtained
using DUMP.
"""
return self.execute_command('RESTORE', name, ttl, value)
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
"""
Set the value at key ``name`` to ``value``
``ex`` sets an expire flag on key ``name`` for ``ex`` seconds.
``px`` sets an expire flag on key ``name`` for ``px`` milliseconds.
``nx`` if set to True, set the value at key ``name`` to ``value`` if it
does not already exist.
``xx`` if set to True, set the value at key ``name`` to ``value`` if it
already exists.
"""
pieces = [name, value]
if ex:
pieces.append('EX')
if isinstance(ex, datetime.timedelta):
ex = ex.seconds + ex.days * 24 * 3600
pieces.append(ex)
if px:
pieces.append('PX')
if isinstance(px, datetime.timedelta):
ms = int(px.microseconds / 1000)
px = (px.seconds + px.days * 24 * 3600) * 1000 + ms
pieces.append(px)
if nx:
pieces.append('NX')
if xx:
pieces.append('XX')
return self.execute_command('SET', *pieces)
def __setitem__(self, name, value):
self.set(name, value)
def setbit(self, name, offset, value):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value = value and 1 or 0
return self.execute_command('SETBIT', name, offset, value)
def setex(self, name, time, value):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def setnx(self, name, value):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return self.execute_command('SETNX', name, value)
def setrange(self, name, offset, value):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return self.execute_command('SETRANGE', name, offset, value)
def strlen(self, name):
"Return the number of bytes stored in the value of ``name``"
return self.execute_command('STRLEN', name)
def substr(self, name, start, end=-1):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return self.execute_command('SUBSTR', name, start, end)
def ttl(self, name):
"Returns the number of seconds until the key ``name`` will expire"
return self.execute_command('TTL', name)
def type(self, name):
"Returns the type of key ``name``"
return self.execute_command('TYPE', name)
def watch(self, *names):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object'))
def unwatch(self):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings.warn(
DeprecationWarning('Call UNWATCH from a Pipeline object'))
# LIST COMMANDS
def blpop(self, keys, timeout=0):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BLPOP', *keys)
def brpop(self, keys, timeout=0):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if timeout is None:
timeout = 0
if isinstance(keys, basestring):
keys = [keys]
else:
keys = list(keys)
keys.append(timeout)
return self.execute_command('BRPOP', *keys)
def brpoplpush(self, src, dst, timeout=0):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if timeout is None:
timeout = 0
return self.execute_command('BRPOPLPUSH', src, dst, timeout)
def lindex(self, name, index):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return self.execute_command('LINDEX', name, index)
def linsert(self, name, where, refvalue, value):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return self.execute_command('LINSERT', name, where, refvalue, value)
def llen(self, name):
"Return the length of the list ``name``"
return self.execute_command('LLEN', name)
def lpop(self, name):
"Remove and return the first item of the list ``name``"
return self.execute_command('LPOP', name)
def lpush(self, name, *values):
"Push ``values`` onto the head of the list ``name``"
return self.execute_command('LPUSH', name, *values)
def lpushx(self, name, value):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return self.execute_command('LPUSHX', name, value)
def lrange(self, name, start, end):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LRANGE', name, start, end)
def lrem(self, name, count, value):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, count, value)
def lset(self, name, index, value):
"Set ``position`` of list ``name`` to ``value``"
return self.execute_command('LSET', name, index, value)
def ltrim(self, name, start, end):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return self.execute_command('LTRIM', name, start, end)
def rpop(self, name):
"Remove and return the last item of the list ``name``"
return self.execute_command('RPOP', name)
def rpoplpush(self, src, dst):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return self.execute_command('RPOPLPUSH', src, dst)
def rpush(self, name, *values):
"Push ``values`` onto the tail of the list ``name``"
return self.execute_command('RPUSH', name, *values)
def rpushx(self, name, value):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return self.execute_command('RPUSHX', name, value)
def sort(self, name, start=None, num=None, by=None, get=None,
desc=False, alpha=False, store=None, groups=False):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
``groups`` if set to True and if ``get`` contains at least two
elements, sort will return a list of tuples, each containing the
values fetched from the arguments to ``get``.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = [name]
if by is not None:
pieces.append(Token('BY'))
pieces.append(by)
if start is not None and num is not None:
pieces.append(Token('LIMIT'))
pieces.append(start)
pieces.append(num)
if get is not None:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if isinstance(get, basestring):
pieces.append(Token('GET'))
pieces.append(get)
else:
for g in get:
pieces.append(Token('GET'))
pieces.append(g)
if desc:
pieces.append(Token('DESC'))
if alpha:
pieces.append(Token('ALPHA'))
if store is not None:
pieces.append(Token('STORE'))
pieces.append(store)
if groups:
if not get or isinstance(get, basestring) or len(get) < 2:
raise DataError('when using "groups" the "get" argument '
'must be specified and contain at least '
'two keys')
options = {'groups': len(get) if groups else None}
return self.execute_command('SORT', *pieces, **options)
# SCAN COMMANDS
def scan(self, cursor=0, match=None, count=None):
"""
Incrementally return lists of key names. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SCAN', *pieces)
def scan_iter(self, match=None, count=None):
"""
Make an iterator using the SCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.scan(cursor=cursor, match=match, count=count)
for item in data:
yield item
def sscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return lists of elements in a set. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('SSCAN', *pieces)
def sscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the SSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.sscan(name, cursor=cursor,
match=match, count=count)
for item in data:
yield item
def hscan(self, name, cursor=0, match=None, count=None):
"""
Incrementally return key/value slices in a hash. Also return a cursor
indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
return self.execute_command('HSCAN', *pieces)
def hscan_iter(self, name, match=None, count=None):
"""
Make an iterator using the HSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
"""
cursor = '0'
while cursor != 0:
cursor, data = self.hscan(name, cursor=cursor,
match=match, count=count)
for item in data.items():
yield item
def zscan(self, name, cursor=0, match=None, count=None,
score_cast_func=float):
"""
Incrementally return lists of elements in a sorted set. Also return a
cursor indicating the scan position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
pieces = [name, cursor]
if match is not None:
pieces.extend([Token('MATCH'), match])
if count is not None:
pieces.extend([Token('COUNT'), count])
options = {'score_cast_func': score_cast_func}
return self.execute_command('ZSCAN', *pieces, **options)
def zscan_iter(self, name, match=None, count=None,
score_cast_func=float):
"""
Make an iterator using the ZSCAN command so that the client doesn't
need to remember the cursor position.
``match`` allows for filtering the keys by pattern
``count`` allows for hint the minimum number of returns
``score_cast_func`` a callable used to cast the score return value
"""
cursor = '0'
while cursor != 0:
cursor, data = self.zscan(name, cursor=cursor, match=match,
count=count,
score_cast_func=score_cast_func)
for item in data:
yield item
# SET COMMANDS
def sadd(self, name, *values):
"Add ``value(s)`` to set ``name``"
return self.execute_command('SADD', name, *values)
def scard(self, name):
"Return the number of elements in set ``name``"
return self.execute_command('SCARD', name)
def sdiff(self, keys, *args):
"Return the difference of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SDIFF', *args)
def sdiffstore(self, dest, keys, *args):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SDIFFSTORE', dest, *args)
def sinter(self, keys, *args):
"Return the intersection of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SINTER', *args)
def sinterstore(self, dest, keys, *args):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SINTERSTORE', dest, *args)
def sismember(self, name, value):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return self.execute_command('SISMEMBER', name, value)
def smembers(self, name):
"Return all members of the set ``name``"
return self.execute_command('SMEMBERS', name)
def smove(self, src, dst, value):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return self.execute_command('SMOVE', src, dst, value)
def spop(self, name):
"Remove and return a random member of set ``name``"
return self.execute_command('SPOP', name)
def srandmember(self, name, number=None):
"""
If ``number`` is None, returns a random member of set ``name``.
If ``number`` is supplied, returns a list of ``number`` random
memebers of set ``name``. Note this is only available when running
Redis 2.6+.
"""
args = number and [number] or []
return self.execute_command('SRANDMEMBER', name, *args)
def srem(self, name, *values):
"Remove ``values`` from set ``name``"
return self.execute_command('SREM', name, *values)
def sunion(self, keys, *args):
"Return the union of sets specified by ``keys``"
args = list_or_args(keys, args)
return self.execute_command('SUNION', *args)
def sunionstore(self, dest, keys, *args):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
args = list_or_args(keys, args)
return self.execute_command('SUNIONSTORE', dest, *args)
# SORTED SET COMMANDS
def zadd(self, name, *args, **kwargs):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(args)
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
def zcard(self, name):
"Return the number of elements in the sorted set ``name``"
return self.execute_command('ZCARD', name)
def zcount(self, name, min, max):
"""
Returns the number of elements in the sorted set at key ``name`` with
a score between ``min`` and ``max``.
"""
return self.execute_command('ZCOUNT', name, min, max)
def zincrby(self, name, value, amount=1):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return self.execute_command('ZINCRBY', name, amount, value)
def zinterstore(self, dest, keys, aggregate=None):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
def zlexcount(self, name, min, max):
"""
Return the number of items in the sorted set ``name`` between the
lexicographical range ``min`` and ``max``.
"""
return self.execute_command('ZLEXCOUNT', name, min, max)
def zrange(self, name, start, end, desc=False, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if desc:
return self.zrevrange(name, start, end, withscores,
score_cast_func)
pieces = ['ZRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrangebylex(self, name, min, max, start=None, num=None):
"""
Return the lexicographical range of values from sorted set ``name``
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice of the
range.
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYLEX', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
return self.execute_command(*pieces)
def zrangebyscore(self, name, min, max, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZRANGEBYSCORE', name, min, max]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrank(self, name, value):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return self.execute_command('ZRANK', name, value)
def zrem(self, name, *values):
"Remove member ``values`` from sorted set ``name``"
return self.execute_command('ZREM', name, *values)
def zremrangebylex(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` between the
lexicographical range specified by ``min`` and ``max``.
Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYLEX', name, min, max)
def zremrangebyrank(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return self.execute_command('ZREMRANGEBYRANK', name, min, max)
def zremrangebyscore(self, name, min, max):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return self.execute_command('ZREMRANGEBYSCORE', name, min, max)
def zrevrange(self, name, start, end, withscores=False,
score_cast_func=float):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in descending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces = ['ZREVRANGE', name, start, end]
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrangebyscore(self, name, max, min, start=None, num=None,
withscores=False, score_cast_func=float):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if (start is not None and num is None) or \
(num is not None and start is None):
raise RedisError("``start`` and ``num`` must both be specified")
pieces = ['ZREVRANGEBYSCORE', name, max, min]
if start is not None and num is not None:
pieces.extend([Token('LIMIT'), start, num])
if withscores:
pieces.append(Token('WITHSCORES'))
options = {
'withscores': withscores,
'score_cast_func': score_cast_func
}
return self.execute_command(*pieces, **options)
def zrevrank(self, name, value):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return self.execute_command('ZREVRANK', name, value)
def zscore(self, name, value):
"Return the score of element ``value`` in sorted set ``name``"
return self.execute_command('ZSCORE', name, value)
def zunionstore(self, dest, keys, aggregate=None):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate)
def _zaggregate(self, command, dest, keys, aggregate=None):
pieces = [command, dest, len(keys)]
if isinstance(keys, dict):
keys, weights = iterkeys(keys), itervalues(keys)
else:
weights = None
pieces.extend(keys)
if weights:
pieces.append(Token('WEIGHTS'))
pieces.extend(weights)
if aggregate:
pieces.append(Token('AGGREGATE'))
pieces.append(aggregate)
return self.execute_command(*pieces)
# HYPERLOGLOG COMMANDS
def pfadd(self, name, *values):
"Adds the specified elements to the specified HyperLogLog."
return self.execute_command('PFADD', name, *values)
def pfcount(self, *sources):
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
"""
return self.execute_command('PFCOUNT', *sources)
def pfmerge(self, dest, *sources):
"Merge N different HyperLogLogs into a single one."
return self.execute_command('PFMERGE', dest, *sources)
# HASH COMMANDS
def hdel(self, name, *keys):
"Delete ``keys`` from hash ``name``"
return self.execute_command('HDEL', name, *keys)
def hexists(self, name, key):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return self.execute_command('HEXISTS', name, key)
def hget(self, name, key):
"Return the value of ``key`` within the hash ``name``"
return self.execute_command('HGET', name, key)
def hgetall(self, name):
"Return a Python dict of the hash's name/value pairs"
return self.execute_command('HGETALL', name)
def hincrby(self, name, key, amount=1):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return self.execute_command('HINCRBY', name, key, amount)
def hincrbyfloat(self, name, key, amount=1.0):
"""
Increment the value of ``key`` in hash ``name`` by floating ``amount``
"""
return self.execute_command('HINCRBYFLOAT', name, key, amount)
def hkeys(self, name):
"Return the list of keys within hash ``name``"
return self.execute_command('HKEYS', name)
def hlen(self, name):
"Return the number of elements in hash ``name``"
return self.execute_command('HLEN', name)
def hset(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return self.execute_command('HSET', name, key, value)
def hsetnx(self, name, key, value):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return self.execute_command('HSETNX', name, key, value)
def hmset(self, name, mapping):
"""
Set key to value within hash ``name`` for each corresponding
key and value from the ``mapping`` dict.
"""
if not mapping:
raise DataError("'hmset' with 'mapping' of length 0")
items = []
for pair in iteritems(mapping):
items.extend(pair)
return self.execute_command('HMSET', name, *items)
def hmget(self, name, keys, *args):
"Returns a list of values ordered identically to ``keys``"
args = list_or_args(keys, args)
return self.execute_command('HMGET', name, *args)
def hvals(self, name):
"Return the list of values within hash ``name``"
return self.execute_command('HVALS', name)
def publish(self, channel, message):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return self.execute_command('PUBLISH', channel, message)
def eval(self, script, numkeys, *keys_and_args):
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVAL', script, numkeys, *keys_and_args)
def evalsha(self, sha, numkeys, *keys_and_args):
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
"""
return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args)
def script_exists(self, *args):
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache.
"""
return self.execute_command('SCRIPT EXISTS', *args)
def script_flush(self):
"Flush all scripts from the script cache"
return self.execute_command('SCRIPT FLUSH')
def script_kill(self):
"Kill the currently executing Lua script"
return self.execute_command('SCRIPT KILL')
def script_load(self, script):
"Load a Lua ``script`` into the script cache. Returns the SHA."
return self.execute_command('SCRIPT LOAD', script)
def register_script(self, script):
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
class Redis(StrictRedis):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
# Overridden callbacks
RESPONSE_CALLBACKS = dict_merge(
StrictRedis.RESPONSE_CALLBACKS,
{
'TTL': lambda r: r >= 0 and r or None,
'PTTL': lambda r: r >= 0 and r or None,
}
)
def pipeline(self, transaction=True, shard_hint=None):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return Pipeline(
self.connection_pool,
self.response_callbacks,
transaction,
shard_hint)
def setex(self, name, value, time):
"""
Set the value of key ``name`` to ``value`` that expires in ``time``
seconds. ``time`` can be represented by an integer or a Python
timedelta object.
"""
if isinstance(time, datetime.timedelta):
time = time.seconds + time.days * 24 * 3600
return self.execute_command('SETEX', name, time, value)
def lrem(self, name, value, num=0):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return self.execute_command('LREM', name, num, value)
def zadd(self, name, *args, **kwargs):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces = []
if args:
if len(args) % 2 != 0:
raise RedisError("ZADD requires an equal number of "
"values and scores")
pieces.extend(reversed(args))
for pair in iteritems(kwargs):
pieces.append(pair[1])
pieces.append(pair[0])
return self.execute_command('ZADD', name, *pieces)
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
PUBLISH_MESSAGE_TYPES = ('message', 'pmessage')
UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe')
def __init__(self, connection_pool, shard_hint=None,
ignore_subscribe_messages=False):
self.connection_pool = connection_pool
self.shard_hint = shard_hint
self.ignore_subscribe_messages = ignore_subscribe_messages
self.connection = None
# we need to know the encoding options for this connection in order
# to lookup channel and pattern names for callback handlers.
conn = connection_pool.get_connection('pubsub', shard_hint)
try:
self.encoding = conn.encoding
self.encoding_errors = conn.encoding_errors
self.decode_responses = conn.decode_responses
finally:
connection_pool.release(conn)
self.reset()
def __del__(self):
try:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
self.reset()
except Exception:
pass
def reset(self):
if self.connection:
self.connection.disconnect()
self.connection.clear_connect_callbacks()
self.connection_pool.release(self.connection)
self.connection = None
self.channels = {}
self.patterns = {}
def close(self):
self.reset()
def on_connect(self, connection):
"Re-subscribe to any channels and patterns previously subscribed to"
# NOTE: for python3, we can't pass bytestrings as keyword arguments
# so we need to decode channel/pattern names back to unicode strings
# before passing them to [p]subscribe.
if self.channels:
channels = {}
for k, v in iteritems(self.channels):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
channels[k] = v
self.subscribe(**channels)
if self.patterns:
patterns = {}
for k, v in iteritems(self.patterns):
if not self.decode_responses:
k = k.decode(self.encoding, self.encoding_errors)
patterns[k] = v
self.psubscribe(**patterns)
def encode(self, value):
"""
Encode the value so that it's identical to what we'll
read off the connection
"""
if self.decode_responses and isinstance(value, bytes):
value = value.decode(self.encoding, self.encoding_errors)
elif not self.decode_responses and isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
@property
def subscribed(self):
"Indicates if there are subscriptions to any channels or patterns"
return bool(self.channels or self.patterns)
def execute_command(self, *args, **kwargs):
"Execute a publish/subscribe command"
# NOTE: don't parse the response in this function. it could pull a
# legitmate message off the stack if the connection is already
# subscribed to one or more channels
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'pubsub',
self.shard_hint
)
# register a callback that re-subscribes to any channels we
# were listening to when we were disconnected
self.connection.register_connect_callback(self.on_connect)
connection = self.connection
self._execute(connection, connection.send_command, *args)
def _execute(self, connection, command, *args):
try:
return command(*args)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection.connect()
# the ``on_connect`` callback should haven been called by the
# connection to resubscribe us to any channels and patterns we were
# previously listening to
return command(*args)
def parse_response(self, block=True, timeout=0):
"Parse the response from a publish/subscribe command"
connection = self.connection
if not block and not connection.can_read(timeout=timeout):
return None
return self._execute(connection, connection.read_response)
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = {}
new_patterns.update(dict.fromkeys(imap(self.encode, args)))
for pattern, handler in iteritems(kwargs):
new_patterns[self.encode(pattern)] = handler
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
self.patterns.update(new_patterns)
return ret_val
def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('PUNSUBSCRIBE', *args)
def subscribe(self, *args, **kwargs):
"""
Subscribe to channels. Channels supplied as keyword arguments expect
a channel name as the key and a callable as the value. A channel's
callable will be invoked automatically when a message is received on
that channel rather than producing a message via ``listen()`` or
``get_message()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_channels = {}
new_channels.update(dict.fromkeys(imap(self.encode, args)))
for channel, handler in iteritems(kwargs):
new_channels[self.encode(channel)] = handler
ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))
# update the channels dict AFTER we send the command. we don't want to
# subscribe twice to these channels, once for the command and again
# for the reconnection.
self.channels.update(new_channels)
return ret_val
def unsubscribe(self, *args):
"""
Unsubscribe from the supplied channels. If empty, unsubscribe from
all channels
"""
if args:
args = list_or_args(args[0], args[1:])
return self.execute_command('UNSUBSCRIBE', *args)
def listen(self):
"Listen for messages on channels this client has been subscribed to"
while self.subscribed:
response = self.handle_message(self.parse_response(block=True))
if response is not None:
yield response
def get_message(self, ignore_subscribe_messages=False, timeout=0):
"""
Get the next message if one is available, otherwise None.
If timeout is specified, the system will wait for `timeout` seconds
before returning. Timeout should be specified as a floating point
number.
"""
response = self.parse_response(block=False, timeout=timeout)
if response:
return self.handle_message(response, ignore_subscribe_messages)
return None
def handle_message(self, response, ignore_subscribe_messages=False):
"""
Parses a pub/sub message. If the channel or pattern was subscribed to
with a message handler, the handler is invoked instead of a parsed
message being returned.
"""
message_type = nativestr(response[0])
if message_type == 'pmessage':
message = {
'type': message_type,
'pattern': response[1],
'channel': response[2],
'data': response[3]
}
else:
message = {
'type': message_type,
'pattern': None,
'channel': response[1],
'data': response[2]
}
# if this is an unsubscribe message, remove it from memory
if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES:
subscribed_dict = None
if message_type == 'punsubscribe':
subscribed_dict = self.patterns
else:
subscribed_dict = self.channels
try:
del subscribed_dict[message['channel']]
except KeyError:
pass
if message_type in self.PUBLISH_MESSAGE_TYPES:
# if there's a message handler, invoke it
handler = None
if message_type == 'pmessage':
handler = self.patterns.get(message['pattern'], None)
else:
handler = self.channels.get(message['channel'], None)
if handler:
handler(message)
return None
else:
# this is a subscribe/unsubscribe message. ignore if we don't
# want them
if ignore_subscribe_messages or self.ignore_subscribe_messages:
return None
return message
def run_in_thread(self, sleep_time=0):
for channel, handler in iteritems(self.channels):
if handler is None:
raise PubSubError("Channel: '%s' has no handler registered")
for pattern, handler in iteritems(self.patterns):
if handler is None:
raise PubSubError("Pattern: '%s' has no handler registered")
thread = PubSubWorkerThread(self, sleep_time)
thread.start()
return thread
class PubSubWorkerThread(threading.Thread):
def __init__(self, pubsub, sleep_time):
super(PubSubWorkerThread, self).__init__()
self.pubsub = pubsub
self.sleep_time = sleep_time
self._running = False
def run(self):
if self._running:
return
self._running = True
pubsub = self.pubsub
sleep_time = self.sleep_time
while pubsub.subscribed:
pubsub.get_message(ignore_subscribe_messages=True,
timeout=sleep_time)
pubsub.close()
self._running = False
def stop(self):
# stopping simply unsubscribes from all channels and patterns.
# the unsubscribe responses that are generated will short circuit
# the loop in run(), calling pubsub.close() to clean up the connection
self.pubsub.unsubscribe()
self.pubsub.punsubscribe()
class BasePipeline(object):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS = set(('DISCARD', 'EXEC', 'UNWATCH'))
def __init__(self, connection_pool, response_callbacks, transaction,
shard_hint):
self.connection_pool = connection_pool
self.connection = None
self.response_callbacks = response_callbacks
self.transaction = transaction
self.shard_hint = shard_hint
self.watching = False
self.reset()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.reset()
def __del__(self):
try:
self.reset()
except Exception:
pass
def __len__(self):
return len(self.command_stack)
def reset(self):
self.command_stack = []
self.scripts = set()
# make sure to reset the connection state in the event that we were
# watching something
if self.watching and self.connection:
try:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self.connection.send_command('UNWATCH')
self.connection.read_response()
except ConnectionError:
# disconnect will also remove any previous WATCHes
self.connection.disconnect()
# clean up the other instance attributes
self.watching = False
self.explicit_transaction = False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def multi(self):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if self.explicit_transaction:
raise RedisError('Cannot issue nested calls to MULTI')
if self.command_stack:
raise RedisError('Commands without an initial WATCH have already '
'been issued')
self.explicit_transaction = True
def execute_command(self, *args, **kwargs):
if (self.watching or args[0] == 'WATCH') and \
not self.explicit_transaction:
return self.immediate_execute_command(*args, **kwargs)
return self.pipeline_execute_command(*args, **kwargs)
def immediate_execute_command(self, *args, **options):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name = args[0]
conn = self.connection
# if this is the first call, we need a connection
if not conn:
conn = self.connection_pool.get_connection(command_name,
self.shard_hint)
self.connection = conn
try:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we're not already watching, we can safely retry the command
try:
if not self.watching:
conn.send_command(*args)
return self.parse_response(conn, command_name, **options)
except ConnectionError:
# the retry failed so cleanup.
conn.disconnect()
self.reset()
raise
def pipeline_execute_command(self, *args, **options):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self.command_stack.append((args, options))
return self
def _execute_transaction(self, connection, commands, raise_on_error):
cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})])
all_cmds = connection.pack_commands([args for args, _ in cmds])
connection.send_packed_command(all_cmds)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
self.parse_response(connection, '_')
except ResponseError:
errors.append((0, sys.exc_info()[1]))
# and all the other commands
for i, command in enumerate(commands):
try:
self.parse_response(connection, '_')
except ResponseError:
ex = sys.exc_info()[1]
self.annotate_exception(ex, i + 1, command[0])
errors.append((i, ex))
# parse the EXEC.
try:
response = self.parse_response(connection, '_')
except ExecAbortError:
if self.explicit_transaction:
self.immediate_execute_command('DISCARD')
if errors:
raise errors[0][1]
raise sys.exc_info()[1]
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(commands):
self.connection.disconnect()
raise ResponseError("Wrong number of response items from "
"pipeline execution")
# find any errors in the response and raise if necessary
if raise_on_error:
self.raise_first_error(commands, response)
# We have to run response callbacks manually
data = []
for r, cmd in izip(response, commands):
if not isinstance(r, Exception):
args, options = cmd
command_name = args[0]
if command_name in self.response_callbacks:
r = self.response_callbacks[command_name](r, **options)
data.append(r)
return data
def _execute_pipeline(self, connection, commands, raise_on_error):
# build up all commands into a single request to increase network perf
all_cmds = connection.pack_commands([args for args, _ in commands])
connection.send_packed_command(all_cmds)
response = []
for args, options in commands:
try:
response.append(
self.parse_response(connection, args[0], **options))
except ResponseError:
response.append(sys.exc_info()[1])
if raise_on_error:
self.raise_first_error(commands, response)
return response
def raise_first_error(self, commands, response):
for i, r in enumerate(response):
if isinstance(r, ResponseError):
self.annotate_exception(r, i + 1, commands[i][0])
raise r
def annotate_exception(self, exception, number, command):
cmd = safe_unicode(' ').join(imap(safe_unicode, command))
msg = unicode('Command # %d (%s) of pipeline caused error: %s') % (
number, cmd, safe_unicode(exception.args[0]))
exception.args = (msg,) + exception.args[1:]
def parse_response(self, connection, command_name, **options):
result = StrictRedis.parse_response(
self, connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self.watching = False
elif command_name == 'WATCH':
self.watching = True
return result
def load_scripts(self):
# make sure all scripts that are about to be run on this pipeline exist
scripts = list(self.scripts)
immediate = self.immediate_execute_command
shas = [s.sha for s in scripts]
# we can't use the normal script_* methods because they would just
# get buffered in the pipeline.
exists = immediate('SCRIPT', 'EXISTS', *shas, **{'parse': 'EXISTS'})
if not all(exists):
for s, exist in izip(scripts, exists):
if not exist:
s.sha = immediate('SCRIPT', 'LOAD', s.script,
**{'parse': 'LOAD'})
def execute(self, raise_on_error=True):
"Execute all the commands in the current pipeline"
stack = self.command_stack
if not stack:
return []
if self.scripts:
self.load_scripts()
if self.transaction or self.explicit_transaction:
execute = self._execute_transaction
else:
execute = self._execute_pipeline
conn = self.connection
if not conn:
conn = self.connection_pool.get_connection('MULTI',
self.shard_hint)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self.connection = conn
try:
return execute(conn, stack, raise_on_error)
except (ConnectionError, TimeoutError) as e:
conn.disconnect()
if not conn.retry_on_timeout and isinstance(e, TimeoutError):
raise
# if we were watching a variable, the watch is no longer valid
# since this connection has died. raise a WatchError, which
# indicates the user should retry his transaction. If this is more
# than a temporary failure, the WATCH that the user next issues
# will fail, propegating the real ConnectionError
if self.watching:
raise WatchError("A ConnectionError occured on while watching "
"one or more keys")
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return execute(conn, stack, raise_on_error)
finally:
self.reset()
def watch(self, *names):
"Watches the values at keys ``names``"
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
return self.execute_command('WATCH', *names)
def unwatch(self):
"Unwatches all previously specified keys"
return self.watching and self.execute_command('UNWATCH') or True
def script_load_for_pipeline(self, script):
"Make sure scripts are loaded prior to pipeline execution"
# we need the sha now so that Script.__call__ can use it to run
# evalsha.
if not script.sha:
script.sha = self.immediate_execute_command('SCRIPT', 'LOAD',
script.script,
**{'parse': 'LOAD'})
self.scripts.add(script)
class StrictPipeline(BasePipeline, StrictRedis):
"Pipeline for the StrictRedis class"
pass
class Pipeline(BasePipeline, Redis):
"Pipeline for the Redis class"
pass
class Script(object):
"An executable Lua script object returned by ``register_script``"
def __init__(self, registered_client, script):
self.registered_client = registered_client
self.script = script
self.sha = ''
def __call__(self, keys=[], args=[], client=None):
"Execute the script, passing any required ``args``"
if client is None:
client = self.registered_client
args = tuple(keys) + tuple(args)
# make sure the Redis server knows about the script
if isinstance(client, BasePipeline):
# make sure this script is good to go on pipeline
client.script_load_for_pipeline(self)
try:
return client.evalsha(self.sha, len(keys), *args)
except NoScriptError:
# Maybe the client is pointed to a differnet server than the client
# that created this instance?
self.sha = client.script_load(self.script)
return client.evalsha(self.sha, len(keys), *args)
|
mit
| -1,483,680,038,123,056,600 | -7,361,766,989,422,627,000 | 36.68097 | 79 | 0.590662 | false |
slagle/ansible-modules-extras
|
packaging/os/yum_repository.py
|
8
|
24683
|
#!/usr/bin/python
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import os
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add and remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
options:
async:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
bandwidth:
required: false
default: 0
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
baseurl:
required: false
default: null
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- This or the I(mirrorlist) parameter is required if I(state) is set to
C(present).
cost:
required: false
default: 1000
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
deltarpm_metadata_percentage:
required: false
default: 100
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
deltarpm_percentage:
required: false
default: 75
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
description:
required: false
default: null
description:
- A human readable string describing the repository.
- This parameter is only required if I(state) is set to C(present).
enabled:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- This tells yum whether or not use this repository.
enablegroups:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Determines whether yum will allow the use of package groups for this
repository.
exclude:
required: false
default: null
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
failovermethod:
required: false
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
file:
required: false
default: null
description:
- File to use to save the repo in. Defaults to the value of I(name).
gpgcakey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
gpgkey:
required: false
default: null
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
http_caching:
required: false
choices: [all, packages, none]
default: all
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
include:
required: false
default: null
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
includepkgs:
required: false
default: null
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
ip_resolve:
required: false
choices: [4, 6, IPv4, IPv6, whatever]
default: whatever
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
keepalive:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
keepcache:
required: false
choices: ['0', '1']
default: '1'
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
metadata_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
metadata_expire_filter:
required: false
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
default: 'read-only:present'
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
metalink:
required: false
default: null
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
mirrorlist:
required: false
default: null
description:
- Specifies a URL to a file containing a list of baseurls.
- This or the I(baseurl) parameter is required if I(state) is set to
C(present).
mirrorlist_expire:
required: false
default: 21600
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
name:
required: true
description:
- Unique repository ID.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
params:
required: false
default: null
description:
- Option used to allow the user to overwrite any of the other options.
To remove an option, set the value of the option to C(null).
password:
required: false
default: null
description:
- Password to use with the username for basic authentication.
priority:
required: false
default: 99
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
protect:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Protect packages from updates from other repositories.
proxy:
required: false
default: null
description:
- URL to the proxy server that yum should use.
proxy_password:
required: false
default: null
description:
- Username to use for proxy.
proxy_username:
required: false
default: null
description:
- Password for this proxy.
repo_gpgcheck:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
reposdir:
required: false
default: /etc/yum.repos.d
description:
- Directory where the C(.repo) files will be stored.
retries:
required: false
default: 10
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
s3_enabled:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
skip_if_unavailable:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
ssl_check_cert_permissions:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
sslcacert:
required: false
default: null
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
sslclientcert:
required: false
default: null
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
sslclientkey:
required: false
default: null
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
sslverify:
required: false
choices: ['yes', 'no']
default: 'yes'
description:
- Defines whether yum should verify SSL certificates/hosts at all.
state:
required: false
choices: [absent, present]
default: present
description:
- State of the repo file.
throttle:
required: false
default: null
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
timeout:
required: false
default: 30
description:
- Number of seconds to wait for a connection before timing out.
ui_repoid_vars:
required: false
default: releasever basearch
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
username:
required: false
default: null
description:
- Username to use for basic authentication to a repo or really any url.
extends_documentation_fragment:
- files
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
'''
EXAMPLES = '''
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
- name: Remove repository
yum_repository:
name: epel
state: absent
- name: Remove repository from a specific repo file
yum_repository:
name: epel
file: external_repos
state: absent
#
# Allow to overwrite the yum_repository parameters by defining the parameters
# as a variable in the defaults or vars file:
#
# my_role_somerepo_params:
# # Disable GPG checking
# gpgcheck: no
# # Remove the gpgkey option
# gpgkey: null
#
- name: Add Some repo
yum_repository:
name: somerepo
description: Some YUM repo
baseurl: http://server.com/path/to/the/repo
gpgkey: http://server.com/keys/somerepo.pub
gpgcheck: yes
params: "{{ my_role_somerepo_params }}"
'''
RETURN = '''
repo:
description: repository name
returned: success
type: string
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = ConfigParser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
self.module.fail_json(
msg='Paramater "baseurl" or "mirrorlist" is required for '
'adding a new repo.')
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
fd = open(self.params['dest'], 'wb')
except IOError, e:
self.module.fail_json(
msg="Cannot open repo file %s." % self.params['dest'],
details=str(e))
self.repofile.write(fd)
try:
fd.close()
except IOError, e:
self.module.fail_json(
msg="Cannot write repo file %s." % self.params['dest'],
details=str(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError, e:
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=str(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
module = AnsibleModule(
argument_spec=dict(
async=dict(type='bool'),
bandwidth=dict(),
baseurl=dict(),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(),
sslclientkey=dict(),
sslverify=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
),
add_file_common_args=True,
supports_check_mode=True,
)
# Update module parameters by user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
yumrepo_before = yumrepo.dump()
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
yumrepo_after = yumrepo.dump()
# Compare repo states
changed = yumrepo_before != yumrepo_after
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state)
# Import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
| -903,027,895,084,398,100 | 2,949,402,719,941,543,400 | 31.954606 | 79 | 0.626828 | false |
wfx/epack
|
epack/libarchive/ffi.py
|
1
|
7623
|
# This file is part of a program licensed under the terms of the GNU Lesser
# General Public License version 2 (or at your option any later version)
# as published by the Free Software Foundation: http://www.gnu.org/licenses/
from __future__ import division, print_function, unicode_literals
from ctypes import (
c_char_p, c_int, c_uint, c_longlong, c_size_t, c_void_p,
c_wchar_p, CFUNCTYPE, POINTER,
)
try:
from ctypes import c_ssize_t
except ImportError:
from ctypes import c_longlong as c_ssize_t
import ctypes
from ctypes.util import find_library
import logging
import mmap
import os
from .exception import ArchiveError
logger = logging.getLogger('libarchive')
page_size = mmap.PAGESIZE
libarchive_path = os.environ.get('LIBARCHIVE') or \
find_library('archive') or \
find_library('libarchive') or \
'libarchive.so'
libarchive = ctypes.cdll.LoadLibrary(libarchive_path)
# Constants
ARCHIVE_EOF = 1 # Found end of archive.
ARCHIVE_OK = 0 # Operation was successful.
ARCHIVE_RETRY = -10 # Retry might succeed.
ARCHIVE_WARN = -20 # Partial success.
ARCHIVE_FAILED = -25 # Current operation cannot complete.
ARCHIVE_FATAL = -30 # No more operations are possible.
AE_IFMT = 0o170000
AE_IFREG = 0o100000
AE_IFLNK = 0o120000
AE_IFSOCK = 0o140000
AE_IFCHR = 0o020000
AE_IFBLK = 0o060000
AE_IFDIR = 0o040000
AE_IFIFO = 0o010000
# Callback types
WRITE_CALLBACK = CFUNCTYPE(
c_ssize_t, c_void_p, c_void_p, POINTER(c_void_p), c_size_t
)
OPEN_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
CLOSE_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
VOID_CB = lambda *_: ARCHIVE_OK
# Type aliases, for readability
c_archive_p = c_void_p
c_archive_entry_p = c_void_p
# Helper functions
def _error_string(archive_p):
msg = error_string(archive_p)
if msg is None:
return
try:
return msg.decode('ascii')
except UnicodeDecodeError:
return msg
def archive_error(archive_p, retcode):
msg = _error_string(archive_p)
raise ArchiveError(msg, errno(archive_p), retcode, archive_p)
def check_null(ret, func, args):
if ret is None:
raise ArchiveError(func.__name__+' returned NULL')
return ret
def check_int(retcode, func, args):
if retcode >= 0:
return retcode
elif retcode == ARCHIVE_WARN:
logger.warning(_error_string(args[0]))
return retcode
else:
raise archive_error(args[0], retcode)
def ffi(name, argtypes, restype, errcheck=None):
f = getattr(libarchive, 'archive_'+name)
f.argtypes = argtypes
f.restype = restype
if errcheck:
f.errcheck = errcheck
globals()[name] = f
return f
# FFI declarations
# archive_util
errno = ffi('errno', [c_archive_p], c_int)
error_string = ffi('error_string', [c_archive_p], c_char_p)
# archive_entry
ffi('entry_new', [], c_archive_entry_p, check_null)
ffi('entry_filetype', [c_archive_entry_p], c_int)
ffi('entry_mtime', [c_archive_entry_p], c_int)
ffi('entry_perm', [c_archive_entry_p], c_int)
ffi('entry_pathname_w', [c_archive_entry_p], c_wchar_p)
ffi('entry_sourcepath', [c_archive_entry_p], c_char_p)
ffi('entry_size', [c_archive_entry_p], c_longlong)
ffi('entry_size_is_set', [c_archive_entry_p], c_int)
ffi('entry_update_pathname_utf8', [c_archive_entry_p, c_char_p], None)
ffi('entry_clear', [c_archive_entry_p], c_archive_entry_p)
ffi('entry_free', [c_archive_entry_p], None)
# archive_read
ffi('read_new', [], c_archive_p, check_null)
READ_FORMATS = set((
'7zip', 'all', 'ar', 'cab', 'cpio', 'empty', 'iso9660', 'lha', 'mtree',
'rar', 'raw', 'tar', 'xar', 'zip'
))
for f_name in list(READ_FORMATS):
try:
ffi('read_support_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read format "%s" is not supported' % f_name)
READ_FORMATS.remove(f_name)
READ_FILTERS = set((
'all', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'none', 'rpm', 'uu', 'xz'
))
for f_name in list(READ_FILTERS):
try:
ffi('read_support_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read filter "%s" is not supported' % f_name)
READ_FILTERS.remove(f_name)
ffi('read_open_fd', [c_archive_p, c_int, c_size_t], c_int, check_int)
ffi('read_open_filename_w', [c_archive_p, c_wchar_p, c_size_t],
c_int, check_int)
ffi('read_open_memory', [c_archive_p, c_void_p, c_size_t], c_int, check_int)
ffi('read_next_header', [c_archive_p, POINTER(c_void_p)], c_int, check_int)
ffi('read_next_header2', [c_archive_p, c_void_p], c_int, check_int)
ffi('read_close', [c_archive_p], c_int, check_int)
ffi('read_free', [c_archive_p], c_int, check_int)
# archive_read_disk
ffi('read_disk_new', [], c_archive_p, check_null)
ffi('read_disk_set_standard_lookup', [c_archive_p], c_int, check_int)
ffi('read_disk_open', [c_archive_p, c_char_p], c_int, check_int)
ffi('read_disk_open_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('read_disk_descend', [c_archive_p], c_int, check_int)
# archive_read_data
ffi('read_data_block',
[c_archive_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_longlong)],
c_int, check_int)
ffi('read_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('read_data_skip', [c_archive_p], c_int, check_int)
# archive_write
ffi('write_new', [], c_archive_p, check_null)
ffi('write_disk_new', [], c_archive_p, check_null)
ffi('write_disk_set_options', [c_archive_p, c_int], c_int, check_int)
WRITE_FORMATS = set((
'7zip', 'ar_bsd', 'ar_svr4', 'cpio', 'cpio_newc', 'gnutar', 'iso9660',
'mtree', 'mtree_classic', 'pax', 'pax_restricted', 'shar', 'shar_dump',
'ustar', 'v7tar', 'xar', 'zip'
))
for f_name in list(WRITE_FORMATS):
try:
ffi('write_set_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write format "%s" is not supported' % f_name)
WRITE_FORMATS.remove(f_name)
WRITE_FILTERS = set((
'b64encode', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'uuencode', 'xz'
))
for f_name in list(WRITE_FILTERS):
try:
ffi('write_add_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write filter "%s" is not supported' % f_name)
WRITE_FILTERS.remove(f_name)
ffi('write_open',
[c_archive_p, c_void_p, OPEN_CALLBACK, WRITE_CALLBACK, CLOSE_CALLBACK],
c_int, check_int)
ffi('write_open_fd', [c_archive_p, c_int], c_int, check_int)
ffi('write_open_filename', [c_archive_p, c_char_p], c_int, check_int)
ffi('write_open_filename_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('write_open_memory',
[c_archive_p, c_void_p, c_size_t, POINTER(c_size_t)],
c_int, check_int)
ffi('write_get_bytes_in_last_block', [c_archive_p], c_int, check_int)
ffi('write_get_bytes_per_block', [c_archive_p], c_int, check_int)
ffi('write_set_bytes_in_last_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_set_bytes_per_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_header', [c_archive_p, c_void_p], c_int, check_int)
ffi('write_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('write_data_block', [c_archive_p, c_void_p, c_size_t, c_longlong],
c_int, check_int)
ffi('write_finish_entry', [c_archive_p], c_int, check_int)
ffi('write_close', [c_archive_p], c_int, check_int)
ffi('write_free', [c_archive_p], c_int, check_int)
|
gpl-3.0
| 9,008,883,253,015,466,000 | -5,024,565,844,388,216,000 | 30.114286 | 79 | 0.647908 | false |
vlachoudis/sl4a
|
python/src/Lib/ast.py
|
139
|
11347
|
# -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(expr, filename='<unknown>', mode='exec'):
"""
Parse an expression into an AST node.
Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST).
"""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all child nodes of *node*, in no specified order. This is
useful if you only want to modify nodes in place and don't care about the
context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
|
apache-2.0
| 5,729,792,879,625,642,000 | 5,299,107,002,150,265,000 | 36.697674 | 81 | 0.61188 | false |
gtko/Sick-Beard
|
lib/hachoir_parser/image/xcf.py
|
90
|
10369
|
"""
Gimp image parser (XCF file, ".xcf" extension).
You can find informations about XCF file in Gimp source code. URL to read
CVS online:
http://cvs.gnome.org/viewcvs/gimp/app/xcf/
\--> files xcf-read.c and xcf-load.c
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (StaticFieldSet, FieldSet, ParserError,
UInt8, UInt32, Enum, Float32, String, PascalString32, RawBytes)
from lib.hachoir_parser.image.common import RGBA
from lib.hachoir_core.endian import NETWORK_ENDIAN
class XcfCompression(FieldSet):
static_size = 8
COMPRESSION_NAME = {
0: u"None",
1: u"RLE",
2: u"Zlib",
3: u"Fractal"
}
def createFields(self):
yield Enum(UInt8(self, "compression", "Compression method"), self.COMPRESSION_NAME)
class XcfResolution(StaticFieldSet):
format = (
(Float32, "xres", "X resolution in DPI"),
(Float32, "yres", "Y resolution in DPI")
)
class XcfTattoo(StaticFieldSet):
format = ((UInt32, "tattoo", "Tattoo"),)
class LayerOffsets(StaticFieldSet):
format = (
(UInt32, "ofst_x", "Offset X"),
(UInt32, "ofst_y", "Offset Y")
)
class LayerMode(FieldSet):
static_size = 32
MODE_NAME = {
0: u"Normal",
1: u"Dissolve",
2: u"Behind",
3: u"Multiply",
4: u"Screen",
5: u"Overlay",
6: u"Difference",
7: u"Addition",
8: u"Subtract",
9: u"Darken only",
10: u"Lighten only",
11: u"Hue",
12: u"Saturation",
13: u"Color",
14: u"Value",
15: u"Divide",
16: u"Dodge",
17: u"Burn",
18: u"Hard light",
19: u"Soft light",
20: u"Grain extract",
21: u"Grain merge",
22: u"Color erase"
}
def createFields(self):
yield Enum(UInt32(self, "mode", "Layer mode"), self.MODE_NAME)
class GimpBoolean(UInt32):
def __init__(self, parent, name):
UInt32.__init__(self, parent, name)
def createValue(self):
return 1 == UInt32.createValue(self)
class XcfUnit(StaticFieldSet):
format = ((UInt32, "unit", "Unit"),)
class XcfParasiteEntry(FieldSet):
def createFields(self):
yield PascalString32(self, "name", "Name", strip="\0", charset="UTF-8")
yield UInt32(self, "flags", "Flags")
yield PascalString32(self, "data", "Data", strip=" \0", charset="UTF-8")
class XcfLevel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "offset", "Offset")
offset = self["offset"].value
if offset == 0:
return
data_offsets = []
while (self.absolute_address + self.current_size)/8 < offset:
chunk = UInt32(self, "data_offset[]", "Data offset")
yield chunk
if chunk.value == 0:
break
data_offsets.append(chunk)
if (self.absolute_address + self.current_size)/8 != offset:
raise ParserError("Problem with level offset.")
previous = offset
for chunk in data_offsets:
data_offset = chunk.value
size = data_offset - previous
yield RawBytes(self, "data[]", size, "Data content of %s" % chunk.name)
previous = data_offset
class XcfHierarchy(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Width")
yield UInt32(self, "height", "Height")
yield UInt32(self, "bpp", "Bits/pixel")
offsets = []
while True:
chunk = UInt32(self, "offset[]", "Level offset")
yield chunk
if chunk.value == 0:
break
offsets.append(chunk.value)
for offset in offsets:
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfLevel(self, "level[]", "Level")
# yield XcfChannel(self, "channel[]", "Channel"))
class XcfChannel(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Channel width")
yield UInt32(self, "height", "Channel height")
yield PascalString32(self, "name", "Channel name", strip="\0", charset="UTF-8")
for field in readProperties(self):
yield field
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
def createDescription(self):
return 'Channel "%s"' % self["name"].value
class XcfLayer(FieldSet):
def createFields(self):
yield UInt32(self, "width", "Layer width in pixels")
yield UInt32(self, "height", "Layer height in pixels")
yield Enum(UInt32(self, "type", "Layer type"), XcfFile.IMAGE_TYPE_NAME)
yield PascalString32(self, "name", "Layer name", strip="\0", charset="UTF-8")
for prop in readProperties(self):
yield prop
# --
# TODO: Hack for Gimp 1.2 files
# --
yield UInt32(self, "hierarchy_ofs", "Hierarchy offset")
yield UInt32(self, "mask_ofs", "Layer mask offset")
padding = self.seekByte(self["hierarchy_ofs"].value, relative=False)
if padding is not None:
yield padding
yield XcfHierarchy(self, "hierarchy", "Hierarchy")
# TODO: Read layer mask if needed: self["mask_ofs"].value != 0
def createDescription(self):
return 'Layer "%s"' % self["name"].value
class XcfParasites(FieldSet):
def createFields(self):
size = self["../size"].value * 8
while self.current_size < size:
yield XcfParasiteEntry(self, "parasite[]", "Parasite")
class XcfProperty(FieldSet):
PROP_COMPRESSION = 17
PROP_RESOLUTION = 19
PROP_PARASITES = 21
TYPE_NAME = {
0: u"End",
1: u"Colormap",
2: u"Active layer",
3: u"Active channel",
4: u"Selection",
5: u"Floating selection",
6: u"Opacity",
7: u"Mode",
8: u"Visible",
9: u"Linked",
10: u"Lock alpha",
11: u"Apply mask",
12: u"Edit mask",
13: u"Show mask",
14: u"Show masked",
15: u"Offsets",
16: u"Color",
17: u"Compression",
18: u"Guides",
19: u"Resolution",
20: u"Tattoo",
21: u"Parasites",
22: u"Unit",
23: u"Paths",
24: u"User unit",
25: u"Vectors",
26: u"Text layer flags",
}
handler = {
6: RGBA,
7: LayerMode,
8: GimpBoolean,
9: GimpBoolean,
10: GimpBoolean,
11: GimpBoolean,
12: GimpBoolean,
13: GimpBoolean,
15: LayerOffsets,
17: XcfCompression,
19: XcfResolution,
20: XcfTattoo,
21: XcfParasites,
22: XcfUnit
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield Enum(UInt32(self, "type", "Property type"), self.TYPE_NAME)
yield UInt32(self, "size", "Property size")
size = self["size"].value
if 0 < size:
cls = self.handler.get(self["type"].value, None)
if cls:
yield cls(self, "data", size=size*8)
else:
yield RawBytes(self, "data", size, "Data")
def createDescription(self):
return "Property: %s" % self["type"].display
def readProperties(parser):
while True:
prop = XcfProperty(parser, "property[]")
yield prop
if prop["type"].value == 0:
return
class XcfFile(Parser):
PARSER_TAGS = {
"id": "xcf",
"category": "image",
"file_ext": ("xcf",),
"mime": (u"image/x-xcf", u"application/x-gimp-image"),
"min_size": (26 + 8 + 4 + 4)*8, # header+empty property+layer offset+channel offset
"magic": (
('gimp xcf file\0', 0),
('gimp xcf v002\0', 0),
),
"description": "Gimp (XCF) picture"
}
endian = NETWORK_ENDIAN
IMAGE_TYPE_NAME = {
0: u"RGB",
1: u"Gray",
2: u"Indexed"
}
def validate(self):
if self.stream.readBytes(0, 14) not in ('gimp xcf file\0', 'gimp xcf v002\0'):
return "Wrong signature"
return True
def createFields(self):
# Read signature
yield String(self, "signature", 14, "Gimp picture signature (ends with nul byte)", charset="ASCII")
# Read image general informations (width, height, type)
yield UInt32(self, "width", "Image width")
yield UInt32(self, "height", "Image height")
yield Enum(UInt32(self, "type", "Image type"), self.IMAGE_TYPE_NAME)
for prop in readProperties(self):
yield prop
# Read layer offsets
layer_offsets = []
while True:
chunk = UInt32(self, "layer_offset[]", "Layer offset")
yield chunk
if chunk.value == 0:
break
layer_offsets.append(chunk.value)
# Read channel offsets
channel_offsets = []
while True:
chunk = UInt32(self, "channel_offset[]", "Channel offset")
yield chunk
if chunk.value == 0:
break
channel_offsets.append(chunk.value)
# Read layers
for index, offset in enumerate(layer_offsets):
if index+1 < len(layer_offsets):
size = (layer_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield XcfLayer(self, "layer[]", size=size)
# Read channels
for index, offset in enumerate(channel_offsets):
if index+1 < len(channel_offsets):
size = (channel_offsets[index+1] - offset) * 8
else:
size = None
padding = self.seekByte(offset, relative=False)
if padding is not None:
yield padding
yield XcfChannel(self, "channel[]", "Channel", size=size)
|
gpl-3.0
| -5,915,470,011,567,143,000 | -9,131,979,599,887,002,000 | 30.326284 | 108 | 0.550776 | false |
vladmm/intellij-community
|
python/helpers/python-skeletons/multiprocessing/__init__.py
|
40
|
4217
|
"""Skeleton for 'multiprocessing' stdlib module."""
from multiprocessing.pool import Pool
class Process(object):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.name = ''
self.daemon = False
self.authkey = None
self.exitcode = None
self.ident = 0
self.pid = 0
self.sentinel = None
def run(self):
pass
def start(self):
pass
def terminate(self):
pass
def join(self, timeout=None):
pass
def is_alive(self):
return False
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class Connection(object):
def send(self, obj):
pass
def recv(self):
pass
def fileno(self):
return 0
def close(self):
pass
def poll(self, timeout=None):
pass
def send_bytes(self, buffer, offset=-1, size=-1):
pass
def recv_bytes(self, maxlength=-1):
pass
def recv_bytes_into(self, buffer, offset=-1):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Pipe(duplex=True):
return Connection(), Connection()
class Queue(object):
def __init__(self, maxsize=-1):
self._maxsize = maxsize
def qsize(self):
return 0
def empty(self):
return False
def full(self):
return False
def put(self, obj, block=True, timeout=None):
pass
def put_nowait(self, obj):
pass
def get(self, block=True, timeout=None):
pass
def get_nowait(self):
pass
def close(self):
pass
def join_thread(self):
pass
def cancel_join_thread(self):
pass
class SimpleQueue(object):
def empty(self):
return False
def get(self):
pass
def put(self, item):
pass
class JoinableQueue(multiprocessing.Queue):
def task_done(self):
pass
def join(self):
pass
def active_childern():
"""
:rtype: list[multiprocessing.Process]
"""
return []
def cpu_count():
return 0
def current_process():
"""
:rtype: multiprocessing.Process
"""
return Process()
def freeze_support():
pass
def get_all_start_methods():
return []
def get_context(method=None):
pass
def get_start_method(allow_none=False):
pass
def set_executable(path):
pass
def set_start_method(method):
pass
class Barrier(object):
def __init__(self, parties, action=None, timeout=None):
self.parties = parties
self.n_waiting = 0
self.broken = False
def wait(self, timeout=None):
pass
def reset(self):
pass
def abort(self):
pass
class Semaphore(object):
def __init__(self, value=1):
pass
def acquire(self, blocking=True, timeout=None):
pass
def release(self):
pass
class BoundedSemaphore(multiprocessing.Semaphore):
pass
class Condition(object):
def __init__(self, lock=None):
pass
def acquire(self, *args):
pass
def release(self):
pass
def wait(self, timeout=None):
pass
def wait_for(self, predicate, timeout=None):
pass
def notify(self, n=1):
pass
def notify_all(self):
pass
class Event(object):
def is_set(self):
return False
def set(self):
pass
def clear(self):
pass
def wait(self, timeout=None):
pass
class Lock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
class RLock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def Value(typecode_or_type, *args, **kwargs):
pass
def Array(typecode_or_type, size_or_initializer, lock=True):
pass
def Manager():
return multiprocessing.SyncManager()
|
apache-2.0
| -1,859,354,273,453,262,600 | -7,476,599,752,285,902,000 | 14.060714 | 79 | 0.574342 | false |
dzz007/photivo
|
scons-local-2.2.0/SCons/Platform/darwin.py
|
14
|
2578
|
"""engine.SCons.Platform.darwin
Platform-specific initialization for Mac OS X systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/darwin.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import posix
import os
def generate(env):
posix.generate(env)
env['SHLIBSUFFIX'] = '.dylib'
# put macports paths at front to override Apple's versions, fink path is after
# For now let people who want Macports or Fink tools specify it!
# env['ENV']['PATH'] = '/opt/local/bin:/opt/local/sbin:' + env['ENV']['PATH'] + ':/sw/bin'
# Store extra system paths in env['ENV']['PATHOSX']
filelist = ['/etc/paths',]
# make sure this works on Macs with Tiger or earlier
try:
dirlist = os.listdir('/etc/paths.d')
except:
dirlist = []
for file in dirlist:
filelist.append('/etc/paths.d/'+file)
for file in filelist:
if os.path.isfile(file):
f = open(file, 'r')
lines = f.readlines()
for line in lines:
if line:
env.AppendENVPath('PATHOSX', line.strip('\n'))
f.close()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-3.0
| -3,652,912,550,429,921,300 | -5,211,900,865,779,853,000 | 35.828571 | 107 | 0.693561 | false |
gilessbrown/wextracto
|
wex/composed.py
|
3
|
4382
|
"""
Wextracto uses `Function composition <http://en.wikipedia.org/wiki/Function_composition_%28computer_science%29>`_
as an easy way to build new functions from existing ones:
.. code-block:: pycon
>>> from wex.composed import compose
>>> def add1(x):
... return x + 1
...
>>> def mult2(x):
... return x * 2
...
>>> f = compose(add1, mult2)
>>> f(2)
6
Wextracto uses the pipe operator, ``|``, as a shorthand for function composition.
This shorthand can be a powerful technique for reducing boilerplate code when
used in combination with :func:`.named` extractors:
.. code-block:: python
from wex.etree import css, text
from wex.extractor import named
attrs = named(title = css('h1') | text
description = css('#description') | text)
"""
from itertools import chain
from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES, partial as functools_partial
from six.moves import map as six_map
def compose(*functions):
""" Create a :class:`.ComposedCallable` from zero more functions. """
return ComposedCallable(*functions)
def composable(func):
""" Decorates a callable to support function composition using ``|``.
For example:
.. code-block:: python
@Composable.decorate
def add1(x):
return x + 1
def mult2(x):
return x * 2
composed = add1 | mult2
"""
return Composable.decorate(func)
class Composable(object):
@classmethod
def decorate(cls, func, **kw):
name = getattr(func, '__name__', str(func))
clsdict = dict(
__call__=staticmethod(func),
__doc__=getattr(func, '__doc__', None),
__name__=getattr(func, '__name__', None),
__module__=getattr(func, '__module__', None),
)
clsdict.update(kw)
return type(name, (cls,), clsdict)()
@classmethod
def __getattr__(cls, name):
return getattr(cls.__call__, name)
@classmethod
def __compose__(cls):
return (cls.__call__,)
def __or__(self, rhs):
assert hasattr(rhs, '__call__')
return compose(self, rhs)
def __ror__(self, lhs):
assert hasattr(lhs, '__call__')
return compose(lhs, self)
def __call__(self, arg):
raise NotImplementedError
def flatten_composed_callables(functions):
iterable = (getattr(f, 'functions', (f,)) for f in functions)
return tuple(chain.from_iterable(iterable))
class ComposedCallable(Composable):
""" A callable, taking one argument, composed from other callables.
.. code-block:: python
def mult2(x):
return x * 2
def add1(x):
return x + 1
composed = ComposedCallable(add1, mult2)
for x in (1, 2, 3):
assert composed(x) == mult2(add1(x))
ComposedCallable objects are :func:`composable <wex.composed.composable>`.
It can be composed of other ComposedCallable objects.
"""
def __init__(self, *functions):
self.functions = flatten_composed_callables(functions)
def __call__(self, arg, **kw):
res = arg
for func in self.functions:
res = func(res, **kw)
return res
def __compose__(self):
return self.functions
def __repr__(self):
return '<%s.%s%r>' % (self.__class__.__module__,
self.__class__.__name__,
self.functions)
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
defaults = {
'__annotations__': {}
}
def decorator(wrapper):
for attr in assigned:
class_ = getattr(wrapped, '__class__', None)
try:
value = getattr(wrapped, attr)
except AttributeError:
try:
value = getattr(class_, attr)
except AttributeError:
value = defaults[attr]
setattr(wrapper, attr, value)
for attr in updated:
value = getattr(wrapped, attr, {})
getattr(wrapper, attr).update(value)
return wrapper
return decorator
def partial(func, *args, **kwargs):
return composable(functools_partial(func, *args, **kwargs))
def map(func):
return partial(six_map, func)
|
bsd-3-clause
| 4,911,864,240,561,613,000 | -5,005,801,208,357,634,000 | 25.083333 | 113 | 0.571657 | false |
NewpTone/stacklab-nova
|
debian/tmp/usr/lib/python2.7/dist-packages/nova/tests/test_quota.py
|
6
|
74369
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova import compute
from nova.compute import instance_types
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
from nova import flags
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import driver as scheduler_driver
from nova import test
import nova.tests.image.fake
from nova import volume
FLAGS = flags.FLAGS
class QuotaIntegrationTestCase(test.TestCase):
def setUp(self):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
quota_instances=2,
quota_cores=4,
quota_volumes=2,
quota_gigabytes=20,
quota_floating_ips=1,
network_manager='nova.network.manager.FlatDHCPManager')
# Apparently needed by the RPC tests...
self.network = self.start_service('network')
self.user_id = 'admin'
self.project_id = 'admin'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry"""
if (topic == FLAGS.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
instance = scheduler().create_instance_db_entry(
context,
msg['args']['request_spec'],
None)
return [scheduler_driver.encode_instance(instance)]
else:
return orig_rpc_call(context, topic, msg)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
nova.tests.image.fake.stub_out_image_service(self.stubs)
def tearDown(self):
super(QuotaIntegrationTestCase, self).tearDown()
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
vol['user_id'] = self.user_id
vol['project_id'] = self.project_id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
def test_too_many_instances(self):
instance_uuids = []
for i in range(FLAGS.quota_instances):
instance = self._create_instance()
instance_uuids.append(instance['uuid'])
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
for instance_uuid in instance_uuids:
db.instance_destroy(self.context, instance_uuid)
def test_too_many_cores(self):
instance = self._create_instance(cores=4)
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid)
db.instance_destroy(self.context, instance['uuid'])
def test_too_many_volumes(self):
volume_ids = []
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_gigabytes(self):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(exception.QuotaError,
volume.API().create,
self.context, 10, '', '', None)
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
self.context,
self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_auto_assigned(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
'project_id': self.project_id})
# auto allocated addresses should not be counted
self.assertRaises(exception.NoMoreFloatingIps,
self.network.allocate_floating_ip,
self.context,
self.project_id,
True)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
metadata = {}
for i in range(FLAGS.quota_metadata_items + 1):
metadata['key%s' % i] = 'value%s' % i
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(exception.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type=inst_type,
image_href=image_uuid,
metadata=metadata)
def _create_with_injected_files(self, files):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context, min_count=1, max_count=1,
instance_type=inst_type, image_href=image_uuid,
injected_files=files)
def test_no_injected_files(self):
api = compute.API()
inst_type = instance_types.get_instance_type_by_name('m1.small')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
api.create(self.context,
instance_type=inst_type,
image_href=image_uuid)
def test_max_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_files(self):
files = []
for i in xrange(FLAGS.quota_injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
files = [('/test/path', content)]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_content_bytes(self):
max = FLAGS.quota_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max + 1)])
files = [('/test/path', content)]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max)])
files = [(path, 'config = quotatest')]
self._create_with_injected_files(files) # no QuotaError
def test_too_many_injected_file_path_bytes(self):
max = FLAGS.quota_injected_file_path_bytes
path = ''.join(['a' for i in xrange(max + 1)])
files = [(path, 'config = quotatest')]
self.assertRaises(exception.QuotaError,
self._create_with_injected_files, files)
def test_reservation_expire(self):
timeutils.set_time_override()
def assertInstancesReserved(reserved):
result = quota.QUOTAS.get_project_quotas(self.context,
self.context.project_id)
self.assertEqual(result['instances']['reserved'], reserved)
quota.QUOTAS.reserve(self.context,
expire=60,
instances=2)
assertInstancesReserved(2)
timeutils.advance_time_seconds(80)
result = quota.QUOTAS.expire(self.context)
assertInstancesReserved(0)
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
self.user_id = 'fake_user'
self.project_id = project_id
self.quota_class = quota_class
def elevated(self):
elevated = self.__class__(self.project_id, self.quota_class)
elevated.is_admin = True
return elevated
class FakeDriver(object):
def __init__(self, by_project=None, by_class=None, reservations=None):
self.called = []
self.by_project = by_project or {}
self.by_class = by_class or {}
self.reservations = reservations or []
def get_by_project(self, context, project_id, resource):
self.called.append(('get_by_project', context, project_id, resource))
try:
return self.by_project[project_id][resource]
except KeyError:
raise exception.ProjectQuotaNotFound(project_id=project_id)
def get_by_class(self, context, quota_class, resource):
self.called.append(('get_by_class', context, quota_class, resource))
try:
return self.by_class[quota_class][resource]
except KeyError:
raise exception.QuotaClassNotFound(class_name=quota_class)
def get_defaults(self, context, resources):
self.called.append(('get_defaults', context, resources))
return resources
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
self.called.append(('get_class_quotas', context, resources,
quota_class, defaults))
return resources
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True, usages=True):
self.called.append(('get_project_quotas', context, resources,
project_id, quota_class, defaults, usages))
return resources
def limit_check(self, context, resources, values):
self.called.append(('limit_check', context, resources, values))
def reserve(self, context, resources, deltas, expire=None):
self.called.append(('reserve', context, resources, deltas, expire))
return self.reservations
def commit(self, context, reservations):
self.called.append(('commit', context, reservations))
def rollback(self, context, reservations):
self.called.append(('rollback', context, reservations))
def destroy_all_by_project(self, context, project_id):
self.called.append(('destroy_all_by_project', context, project_id))
def expire(self, context):
self.called.append(('expire', context))
class BaseResourceTestCase(test.TestCase):
def test_no_flag(self):
resource = quota.BaseResource('test_resource')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, None)
self.assertEqual(resource.default, -1)
def test_with_flag(self):
# We know this flag exists, so use it...
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, 10)
def test_with_flag_no_quota(self):
self.flags(quota_instances=-1)
resource = quota.BaseResource('test_resource', 'quota_instances')
self.assertEqual(resource.name, 'test_resource')
self.assertEqual(resource.flag, 'quota_instances')
self.assertEqual(resource.default, -1)
def test_quota_no_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver()
context = FakeContext(None, None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 10)
def test_quota_with_project_no_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
))
context = FakeContext('test_project', None)
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_no_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext(None, 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 20)
def test_quota_with_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
),
by_class=dict(
test_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context)
self.assertEqual(quota_value, 15)
def test_quota_override_project_with_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=15),
override_project=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
project_id='override_project')
self.assertEqual(quota_value, 20)
def test_quota_with_project_override_class(self):
self.flags(quota_instances=10)
resource = quota.BaseResource('test_resource', 'quota_instances')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=15),
override_class=dict(test_resource=20),
))
context = FakeContext('test_project', 'test_class')
quota_value = resource.quota(driver, context,
quota_class='override_class')
self.assertEqual(quota_value, 20)
class QuotaEngineTestCase(test.TestCase):
def test_init(self):
quota_obj = quota.QuotaEngine()
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, quota.DbQuotaDriver))
def test_init_override_string(self):
quota_obj = quota.QuotaEngine(
quota_driver_class='nova.tests.test_quota.FakeDriver')
self.assertEqual(quota_obj._resources, {})
self.assertTrue(isinstance(quota_obj._driver, FakeDriver))
def test_init_override_obj(self):
quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
self.assertEqual(quota_obj._resources, {})
self.assertEqual(quota_obj._driver, FakeDriver)
def test_register_resource(self):
quota_obj = quota.QuotaEngine()
resource = quota.AbsoluteResource('test_resource')
quota_obj.register_resource(resource)
self.assertEqual(quota_obj._resources, dict(test_resource=resource))
def test_register_resources(self):
quota_obj = quota.QuotaEngine()
resources = [
quota.AbsoluteResource('test_resource1'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource3'),
]
quota_obj.register_resources(resources)
self.assertEqual(quota_obj._resources, dict(
test_resource1=resources[0],
test_resource2=resources[1],
test_resource3=resources[2],
))
def test_sync_predeclared(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resource = quota.ReservableResource('test_resource', spam)
quota_obj.register_resource(resource)
self.assertEqual(resource.sync, spam)
def test_sync_multi(self):
quota_obj = quota.QuotaEngine()
def spam(*args, **kwargs):
pass
resources = [
quota.ReservableResource('test_resource1', spam),
quota.ReservableResource('test_resource2', spam),
quota.ReservableResource('test_resource3', spam),
quota.ReservableResource('test_resource4', spam),
]
quota_obj.register_resources(resources[:2])
self.assertEqual(resources[0].sync, spam)
self.assertEqual(resources[1].sync, spam)
self.assertEqual(resources[2].sync, spam)
self.assertEqual(resources[3].sync, spam)
def test_get_by_project(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_project=dict(
test_project=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_project(context, 'test_project',
'test_resource')
self.assertEqual(driver.called, [
('get_by_project', context, 'test_project', 'test_resource'),
])
self.assertEqual(result, 42)
def test_get_by_class(self):
context = FakeContext('test_project', 'test_class')
driver = FakeDriver(by_class=dict(
test_class=dict(test_resource=42)))
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
self.assertEqual(driver.called, [
('get_by_class', context, 'test_class', 'test_resource'),
])
self.assertEqual(result, 42)
def _make_quota_obj(self, driver):
quota_obj = quota.QuotaEngine(quota_driver_class=driver)
resources = [
quota.AbsoluteResource('test_resource4'),
quota.AbsoluteResource('test_resource3'),
quota.AbsoluteResource('test_resource2'),
quota.AbsoluteResource('test_resource1'),
]
quota_obj.register_resources(resources)
return quota_obj
def test_get_defaults(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result = quota_obj.get_defaults(context)
self.assertEqual(driver.called, [
('get_defaults', context, quota_obj._resources),
])
self.assertEqual(result, quota_obj._resources)
def test_get_class_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_class_quotas(context, 'test_class')
result2 = quota_obj.get_class_quotas(context, 'test_class', False)
self.assertEqual(driver.called, [
('get_class_quotas', context, quota_obj._resources,
'test_class', True),
('get_class_quotas', context, quota_obj._resources,
'test_class', False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_get_project_quotas(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.get_project_quotas(context, 'test_project')
result2 = quota_obj.get_project_quotas(context, 'test_project',
quota_class='test_class',
defaults=False,
usages=False)
self.assertEqual(driver.called, [
('get_project_quotas', context, quota_obj._resources,
'test_project', None, True, True),
('get_project_quotas', context, quota_obj._resources,
'test_project', 'test_class', False, False),
])
self.assertEqual(result1, quota_obj._resources)
self.assertEqual(result2, quota_obj._resources)
def test_count_no_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource5',
True, foo='bar')
def test_count_wrong_resource(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
self.assertRaises(exception.QuotaResourceUnknown,
quota_obj.count, context, 'test_resource1',
True, foo='bar')
def test_count(self):
def fake_count(context, *args, **kwargs):
self.assertEqual(args, (True,))
self.assertEqual(kwargs, dict(foo='bar'))
return 5
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.register_resource(quota.CountableResource('test_resource5',
fake_count))
result = quota_obj.count(context, 'test_resource5', True, foo='bar')
self.assertEqual(result, 5)
def test_limit_check(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
test_resource3=2, test_resource4=1)
self.assertEqual(driver.called, [
('limit_check', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
)),
])
def test_reserve(self):
context = FakeContext(None, None)
driver = FakeDriver(reservations=[
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
quota_obj = self._make_quota_obj(driver)
result1 = quota_obj.reserve(context, test_resource1=4,
test_resource2=3, test_resource3=2,
test_resource4=1)
result2 = quota_obj.reserve(context, expire=3600,
test_resource1=1, test_resource2=2,
test_resource3=3, test_resource4=4)
self.assertEqual(driver.called, [
('reserve', context, quota_obj._resources, dict(
test_resource1=4,
test_resource2=3,
test_resource3=2,
test_resource4=1,
), None),
('reserve', context, quota_obj._resources, dict(
test_resource1=1,
test_resource2=2,
test_resource3=3,
test_resource4=4,
), 3600),
])
self.assertEqual(result1, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
self.assertEqual(result2, [
'resv-01', 'resv-02', 'resv-03', 'resv-04',
])
def test_commit(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('commit', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_rollback(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
self.assertEqual(driver.called, [
('rollback', context, ['resv-01', 'resv-02', 'resv-03']),
])
def test_destroy_all_by_project(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.destroy_all_by_project(context, 'test_project')
self.assertEqual(driver.called, [
('destroy_all_by_project', context, 'test_project'),
])
def test_expire(self):
context = FakeContext(None, None)
driver = FakeDriver()
quota_obj = self._make_quota_obj(driver)
quota_obj.expire(context)
self.assertEqual(driver.called, [
('expire', context),
])
def test_resources(self):
quota_obj = self._make_quota_obj(None)
self.assertEqual(quota_obj.resources,
['test_resource1', 'test_resource2',
'test_resource3', 'test_resource4'])
class DbQuotaDriverTestCase(test.TestCase):
def setUp(self):
super(DbQuotaDriverTestCase, self).setUp()
self.flags(quota_instances=10,
quota_cores=20,
quota_ram=50 * 1024,
quota_volumes=10,
quota_gigabytes=1000,
quota_floating_ips=10,
quota_metadata_items=128,
quota_injected_files=5,
quota_injected_file_content_bytes=10 * 1024,
quota_injected_file_path_bytes=255,
quota_security_groups=10,
quota_security_group_rules=20,
reservation_expire=86400,
until_refresh=0,
max_age=0,
)
self.driver = quota.DbQuotaDriver()
self.calls = []
timeutils.set_time_override()
def tearDown(self):
timeutils.clear_time_override()
super(DbQuotaDriverTestCase, self).tearDown()
def test_get_defaults(self):
# Use our pre-defined resources
result = self.driver.get_defaults(None, quota.QUOTAS._resources)
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def _stub_quota_class_get_all_by_name(self):
# Stub out quota_class_get_all_by_name
def fake_qcgabn(context, quota_class):
self.calls.append('quota_class_get_all_by_name')
self.assertEqual(quota_class, 'test_class')
return dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
)
self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
def test_get_class_quotas(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class')
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
cores=20,
ram=25 * 1024,
volumes=10,
gigabytes=500,
floating_ips=10,
metadata_items=64,
injected_files=5,
injected_file_content_bytes=5 * 1024,
injected_file_path_bytes=255,
security_groups=10,
security_group_rules=20,
key_pairs=100,
))
def test_get_class_quotas_no_defaults(self):
self._stub_quota_class_get_all_by_name()
result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
'test_class', False)
self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
self.assertEqual(result, dict(
instances=5,
ram=25 * 1024,
gigabytes=500,
metadata_items=64,
injected_file_content_bytes=5 * 1024,
))
def _stub_get_by_project(self):
def fake_qgabp(context, project_id):
self.calls.append('quota_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
cores=10,
gigabytes=50,
injected_files=2,
injected_file_path_bytes=127,
)
def fake_qugabp(context, project_id):
self.calls.append('quota_usage_get_all_by_project')
self.assertEqual(project_id, 'test_project')
return dict(
instances=dict(in_use=2, reserved=2),
cores=dict(in_use=4, reserved=4),
ram=dict(in_use=10 * 1024, reserved=0),
volumes=dict(in_use=2, reserved=0),
gigabytes=dict(in_use=10, reserved=0),
floating_ips=dict(in_use=2, reserved=0),
metadata_items=dict(in_use=0, reserved=0),
injected_files=dict(in_use=0, reserved=0),
injected_file_content_bytes=dict(in_use=0, reserved=0),
injected_file_path_bytes=dict(in_use=0, reserved=0),
)
self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
self._stub_quota_class_get_all_by_name()
def test_get_project_quotas(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_no_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
])
self.assertEqual(result, dict(
instances=dict(
limit=10,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=50 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=128,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=10 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_alt_context_with_class(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('other_project', 'other_class'),
quota.QUOTAS._resources, 'test_project', quota_class='test_class')
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
in_use=2,
reserved=2,
),
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
ram=dict(
limit=25 * 1024,
in_use=10 * 1024,
reserved=0,
),
volumes=dict(
limit=10,
in_use=2,
reserved=0,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
floating_ips=dict(
limit=10,
in_use=2,
reserved=0,
),
metadata_items=dict(
limit=64,
in_use=0,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
security_groups=dict(
limit=10,
in_use=0,
reserved=0,
),
security_group_rules=dict(
limit=20,
in_use=0,
reserved=0,
),
key_pairs=dict(
limit=100,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_defaults(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', defaults=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_usage_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
cores=dict(
limit=10,
in_use=4,
reserved=4,
),
gigabytes=dict(
limit=50,
in_use=10,
reserved=0,
),
injected_files=dict(
limit=2,
in_use=0,
reserved=0,
),
injected_file_path_bytes=dict(
limit=127,
in_use=0,
reserved=0,
),
))
def test_get_project_quotas_no_usages(self):
self._stub_get_by_project()
result = self.driver.get_project_quotas(
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources, 'test_project', usages=False)
self.assertEqual(self.calls, [
'quota_get_all_by_project',
'quota_class_get_all_by_name',
])
self.assertEqual(result, dict(
instances=dict(
limit=5,
),
cores=dict(
limit=10,
),
ram=dict(
limit=25 * 1024,
),
volumes=dict(
limit=10,
),
gigabytes=dict(
limit=50,
),
floating_ips=dict(
limit=10,
),
metadata_items=dict(
limit=64,
),
injected_files=dict(
limit=2,
),
injected_file_content_bytes=dict(
limit=5 * 1024,
),
injected_file_path_bytes=dict(
limit=127,
),
security_groups=dict(
limit=10,
),
security_group_rules=dict(
limit=20,
),
key_pairs=dict(
limit=100,
),
))
def _stub_get_project_quotas(self):
def fake_get_project_quotas(context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
self.calls.append('get_project_quotas')
return dict((k, dict(limit=v.default))
for k, v in resources.items())
self.stubs.Set(self.driver, 'get_project_quotas',
fake_get_project_quotas)
def test_get_quotas_has_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_unknown(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['unknown'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync_no_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['metadata_items'], True)
self.assertEqual(self.calls, [])
def test_get_quotas_no_sync_has_sync_resource(self):
self._stub_get_project_quotas()
self.assertRaises(exception.QuotaResourceUnknown,
self.driver._get_quotas,
None, quota.QUOTAS._resources,
['instances'], False)
self.assertEqual(self.calls, [])
def test_get_quotas_has_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['instances', 'cores', 'ram',
'volumes', 'gigabytes',
'floating_ips', 'security_groups'],
True)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
instances=10,
cores=20,
ram=50 * 1024,
volumes=10,
gigabytes=1000,
floating_ips=10,
security_groups=10,
))
def test_get_quotas_no_sync(self):
self._stub_get_project_quotas()
result = self.driver._get_quotas(FakeContext('test_project',
'test_class'),
quota.QUOTAS._resources,
['metadata_items', 'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes',
'security_group_rules'], False)
self.assertEqual(self.calls, ['get_project_quotas'])
self.assertEqual(result, dict(
metadata_items=128,
injected_files=5,
injected_file_content_bytes=10 * 1024,
injected_file_path_bytes=255,
security_group_rules=20,
))
def test_limit_check_under(self):
self._stub_get_project_quotas()
self.assertRaises(exception.InvalidQuotaValue,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=-1))
def test_limit_check_over(self):
self._stub_get_project_quotas()
self.assertRaises(exception.OverQuota,
self.driver.limit_check,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=129))
def test_limit_check_unlimited(self):
self.flags(quota_metadata_items=-1)
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=32767))
def test_limit_check(self):
self._stub_get_project_quotas()
self.driver.limit_check(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(metadata_items=128))
def _stub_quota_reserve(self):
def fake_quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age):
self.calls.append(('quota_reserve', expire, until_refresh,
max_age))
return ['resv-1', 'resv-2', 'resv-3']
self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
def test_reserve_bad_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.assertRaises(exception.InvalidReservationExpiration,
self.driver.reserve,
FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire='invalid')
self.assertEqual(self.calls, [])
def test_reserve_default_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2))
expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_int_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=3600)
expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_timedelta_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire_delta = datetime.timedelta(seconds=60)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire_delta)
expire = timeutils.utcnow() + expire_delta
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_datetime_expire(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_until_refresh(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(until_refresh=500)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 500, 0),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
def test_reserve_max_age(self):
self._stub_get_project_quotas()
self._stub_quota_reserve()
self.flags(max_age=86400)
expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
result = self.driver.reserve(FakeContext('test_project', 'test_class'),
quota.QUOTAS._resources,
dict(instances=2), expire=expire)
self.assertEqual(self.calls, [
'get_project_quotas',
('quota_reserve', expire, 0, 86400),
])
self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
class FakeSession(object):
def begin(self):
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
return False
class FakeUsage(sqa_models.QuotaUsage):
def save(self, *args, **kwargs):
pass
class QuotaReserveSqlAlchemyTestCase(test.TestCase):
# nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
# own test case, and since it's a quota manipulator, this is the
# best place to put it...
def setUp(self):
super(QuotaReserveSqlAlchemyTestCase, self).setUp()
self.sync_called = set()
def make_sync(res_name):
def sync(context, project_id, session):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
return {res_name: 2}
else:
return {res_name: self.usages[res_name].in_use - 1}
return {res_name: 0}
return sync
self.resources = {}
for res_name in ('instances', 'cores', 'ram'):
res = quota.ReservableResource(res_name, make_sync(res_name))
self.resources[res_name] = res
self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
self.usages = {}
self.usages_created = {}
self.reservations_created = {}
def fake_get_session():
return FakeSession()
def fake_get_quota_usages(context, session):
return self.usages.copy()
def fake_quota_usage_create(context, project_id, resource, in_use,
reserved, until_refresh, session=None,
save=True):
quota_usage_ref = self._make_quota_usage(
project_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
self.usages_created[resource] = quota_usage_ref
return quota_usage_ref
def fake_reservation_create(context, uuid, usage_id, project_id,
resource, delta, expire, session=None):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
self.reservations_created[resource] = reservation_ref
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
self.stubs.Set(sqa_api, '_get_quota_usages', fake_get_quota_usages)
self.stubs.Set(sqa_api, 'quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, 'reservation_create', fake_reservation_create)
timeutils.set_time_override()
def _make_quota_usage(self, project_id, resource, in_use, reserved,
until_refresh, created_at, updated_at):
quota_usage_ref = FakeUsage()
quota_usage_ref.id = len(self.usages) + len(self.usages_created)
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.created_at = created_at
quota_usage_ref.updated_at = updated_at
quota_usage_ref.deleted_at = None
quota_usage_ref.deleted = False
return quota_usage_ref
def init_usage(self, project_id, resource, in_use, reserved,
until_refresh=None, created_at=None, updated_at=None):
if created_at is None:
created_at = timeutils.utcnow()
if updated_at is None:
updated_at = timeutils.utcnow()
quota_usage_ref = self._make_quota_usage(project_id, resource, in_use,
reserved, until_refresh,
created_at, updated_at)
self.usages[resource] = quota_usage_ref
def compare_usage(self, usage_dict, expected):
for usage in expected:
resource = usage['resource']
for key, value in usage.items():
actual = getattr(usage_dict[resource], key)
self.assertEqual(actual, value,
"%s != %s on usage for resource %s" %
(actual, value, resource))
def _make_reservation(self, uuid, usage_id, project_id, resource,
delta, expire, created_at, updated_at):
reservation_ref = sqa_models.Reservation()
reservation_ref.id = len(self.reservations_created)
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage_id
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.created_at = created_at
reservation_ref.updated_at = updated_at
reservation_ref.deleted_at = None
reservation_ref.deleted = False
return reservation_ref
def compare_reservation(self, reservations, expected):
reservations = set(reservations)
for resv in expected:
resource = resv['resource']
resv_obj = self.reservations_created[resource]
self.assertIn(resv_obj.uuid, reservations)
reservations.discard(resv_obj.uuid)
for key, value in resv.items():
actual = getattr(resv_obj, key)
self.assertEqual(actual, value,
"%s != %s on reservation for resource %s" %
(actual, value, resource))
self.assertEqual(len(reservations), 0)
def test_quota_reserve_create_usages(self):
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages_created, [
dict(resource='instances',
project_id='test_project',
in_use=0,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=0,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=0,
reserved=2 * 1024,
until_refresh=None),
])
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages_created['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages_created['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages_created['ram'],
delta=2 * 1024),
])
def test_quota_reserve_negative_in_use(self):
self.init_usage('test_project', 'instances', -1, 0, until_refresh=1)
self.init_usage('test_project', 'cores', -1, 0, until_refresh=1)
self.init_usage('test_project', 'ram', -1, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_until_refresh(self):
self.init_usage('test_project', 'instances', 3, 0, until_refresh=1)
self.init_usage('test_project', 'cores', 3, 0, until_refresh=1)
self.init_usage('test_project', 'ram', 3, 0, until_refresh=1)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=5),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=5),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=5),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_max_age(self):
max_age = 3600
record_created = (timeutils.utcnow() -
datetime.timedelta(seconds=max_age))
self.init_usage('test_project', 'instances', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'cores', 3, 0,
created_at=record_created, updated_at=record_created)
self.init_usage('test_project', 'ram', 3, 0,
created_at=record_created, updated_at=record_created)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
self.assertEqual(self.sync_called, set(['instances', 'cores', 'ram']))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=2,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=2,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=2,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_no_refresh(self):
self.init_usage('test_project', 'instances', 3, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 3, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=3,
reserved=2,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=4,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=3,
reserved=2 * 1024,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=2 * 1024),
])
def test_quota_reserve_unders(self):
self.init_usage('test_project', 'instances', 1, 0)
self.init_usage('test_project', 'cores', 3, 0)
self.init_usage('test_project', 'ram', 1 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=1,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=3,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=1 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
delta=-2 * 1024),
])
def test_quota_reserve_overs(self):
self.init_usage('test_project', 'instances', 4, 0)
self.init_usage('test_project', 'cores', 8, 0)
self.init_usage('test_project', 'ram', 10 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=2,
cores=4,
ram=2 * 1024,
)
self.assertRaises(exception.OverQuota,
sqa_api.quota_reserve,
context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=4,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=8,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=10 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.assertEqual(self.reservations_created, {})
def test_quota_reserve_reduction(self):
self.init_usage('test_project', 'instances', 10, 0)
self.init_usage('test_project', 'cores', 20, 0)
self.init_usage('test_project', 'ram', 20 * 1024, 0)
context = FakeContext('test_project', 'test_class')
quotas = dict(
instances=5,
cores=10,
ram=10 * 1024,
)
deltas = dict(
instances=-2,
cores=-4,
ram=-2 * 1024,
)
result = sqa_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
self.assertEqual(self.sync_called, set([]))
self.compare_usage(self.usages, [
dict(resource='instances',
project_id='test_project',
in_use=10,
reserved=0,
until_refresh=None),
dict(resource='cores',
project_id='test_project',
in_use=20,
reserved=0,
until_refresh=None),
dict(resource='ram',
project_id='test_project',
in_use=20 * 1024,
reserved=0,
until_refresh=None),
])
self.assertEqual(self.usages_created, {})
self.compare_reservation(result, [
dict(resource='instances',
usage_id=self.usages['instances'],
project_id='test_project',
delta=-2),
dict(resource='cores',
usage_id=self.usages['cores'],
project_id='test_project',
delta=-4),
dict(resource='ram',
usage_id=self.usages['ram'],
project_id='test_project',
delta=-2 * 1024),
])
|
apache-2.0
| 5,571,268,208,118,497,000 | -2,615,909,754,647,632,400 | 37.295057 | 79 | 0.495112 | false |
AustereCuriosity/astropy
|
astropy/io/fits/verify.py
|
4
|
5728
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import unicode_literals
import operator
import warnings
from ...extern.six import next
from ...utils import indent
from ...utils.exceptions import AstropyUserWarning
class VerifyError(Exception):
"""
Verify exception class.
"""
class VerifyWarning(AstropyUserWarning):
"""
Verify warning class.
"""
VERIFY_OPTIONS = ['ignore', 'warn', 'exception', 'fix', 'silentfix',
'fix+ignore', 'fix+warn', 'fix+exception',
'silentfix+ignore', 'silentfix+warn', 'silentfix+exception']
class _Verify(object):
"""
Shared methods for verification.
"""
def run_option(self, option='warn', err_text='', fix_text='Fixed.',
fix=None, fixable=True):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ['warn', 'exception']:
fixable = False
# fix the value
elif not fixable:
text = 'Unfixable error: {}'.format(text)
else:
if fix:
fix()
text += ' ' + fix_text
return (fixable, text)
def verify(self, option='warn'):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError('Option {!r} not recognized.'.format(option))
if opt == 'ignore':
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if '+' in opt:
fix_opt, report_opt = opt.split('+')
elif opt in ['fix', 'silentfix']:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, 'exception'
else:
fix_opt, report_opt = None, opt
if fix_opt == 'silentfix' and report_opt == 'ignore':
# Fixable errors were fixed, but don't report anything
return
if fix_opt == 'silentfix':
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == 'fix' and report_opt == 'ignore':
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, 'Verification reported errors:')
messages.append('Note: astropy.io.fits uses zero-based indexing.\n')
if fix_opt == 'silentfix' and not unfixable:
return
elif report_opt == 'warn' or (fix_opt == 'fix' and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError('\n' + '\n'.join(messages))
class _ErrList(list):
"""
Verification errors list class. It has a nested list structure
constructed by error messages generated by verifications at
different class levels.
"""
def __new__(cls, val=None, unit='Element'):
return super(cls, cls).__new__(cls, val)
def __init__(self, val=None, unit='Element'):
self.unit = unit
def __str__(self):
return '\n'.join(item[1] for item in self.iter_lines())
def iter_lines(self, filter=None, shift=0):
"""
Iterate the nested structure as a list of strings with appropriate
indentations for each level of structure.
"""
element = 0
# go through the list twice, first time print out all top level
# messages
for item in self:
if not isinstance(item, _ErrList):
if filter is None or filter(item):
yield item[0], indent(item[1], shift=shift)
# second time go through the next level items, each of the next level
# must present, even it has nothing.
for item in self:
if isinstance(item, _ErrList):
next_lines = item.iter_lines(filter=filter, shift=shift + 1)
try:
first_line = next(next_lines)
except StopIteration:
first_line = None
if first_line is not None:
if self.unit:
# This line is sort of a header for the next level in
# the hierarchy
yield None, indent('{} {}:'.format(self.unit, element),
shift=shift)
yield first_line
for line in next_lines:
yield line
element += 1
|
bsd-3-clause
| -211,786,760,437,903,970 | 7,256,470,285,897,362,000 | 31.731429 | 80 | 0.539979 | false |
robbiet480/home-assistant
|
tests/auth/test_init.py
|
13
|
32299
|
"""Tests for the Home Assistant auth module."""
from datetime import timedelta
import jwt
import pytest
import voluptuous as vol
from homeassistant import auth, data_entry_flow
from homeassistant.auth import auth_store, const as auth_const, models as auth_models
from homeassistant.auth.const import MFA_SESSION_EXPIRATION
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import CLIENT_ID, MockUser, ensure_auth_manager_loaded, flush_store
@pytest.fixture
def mock_hass(loop):
"""Home Assistant mock with minimum amount of data set to make it work with auth."""
hass = Mock()
hass.config.skip_pip = True
return hass
async def test_auth_manager_from_config_validates_config(mock_hass):
"""Test get auth providers."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Invalid configuration because no users",
"type": "insecure_example",
"id": "invalid_config",
},
],
[],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[],
)
providers = [
{"name": provider.name, "id": provider.id, "type": provider.type}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
async def test_auth_manager_from_config_auth_modules(mock_hass):
"""Test get auth modules."""
with pytest.raises(vol.Invalid):
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Invalid configuration because no data",
"type": "insecure_example",
"id": "another",
},
],
)
manager = await auth.auth_manager_from_config(
mock_hass,
[
{"name": "Test Name", "type": "insecure_example", "users": []},
{
"name": "Test Name 2",
"type": "insecure_example",
"id": "another",
"users": [],
},
],
[
{"name": "Module 1", "type": "insecure_example", "data": []},
{
"name": "Module 2",
"type": "insecure_example",
"id": "another",
"data": [],
},
],
)
providers = [
{"name": provider.name, "type": provider.type, "id": provider.id}
for provider in manager.auth_providers
]
assert providers == [
{"name": "Test Name", "type": "insecure_example", "id": None},
{"name": "Test Name 2", "type": "insecure_example", "id": "another"},
]
modules = [
{"name": module.name, "type": module.type, "id": module.id}
for module in manager.auth_mfa_modules
]
assert modules == [
{"name": "Module 1", "type": "insecure_example", "id": "insecure_example"},
{"name": "Module 2", "type": "insecure_example", "id": "another"},
]
async def test_create_new_user(hass):
"""Test creating new user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.is_owner is False
assert user.name == "Test Name"
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_login_as_existing_user(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add a fake user that we're not going to log in with
user = MockUser(
id="mock-user2", is_owner=False, is_active=False, name="Not user"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id2",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "other-user"},
is_new=False,
)
)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_linking_user_to_two_auth_providers(hass, hass_storage):
"""Test linking user to two auth providers."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
},
{
"type": "insecure_example",
"id": "another-provider",
"users": [{"username": "another-user", "password": "another-password"}],
},
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
step = await manager.login_flow.async_init(
("insecure_example", "another-provider"), context={"credential_only": True}
)
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "another-user", "password": "another-password"}
)
new_credential = step["result"]
await manager.async_link_user(user, new_credential)
assert len(user.credentials) == 2
async def test_saving_loading(hass, hass_storage):
"""Test storing and saving data.
Creates one of each type that we store to test we restore correctly.
"""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
await manager.async_activate_user(user)
# the first refresh token will be used to create access token
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
manager.async_create_access_token(refresh_token, "192.168.0.1")
# the second refresh token will not be used
await manager.async_create_refresh_token(user, "dummy-client")
await flush_store(manager._store._store)
store2 = auth_store.AuthStore(hass)
users = await store2.async_get_users()
assert len(users) == 1
assert users[0].permissions == user.permissions
assert users[0] == user
assert len(users[0].refresh_tokens) == 2
for r_token in users[0].refresh_tokens.values():
if r_token.client_id == CLIENT_ID:
# verify the first refresh token
assert r_token.last_used_at is not None
assert r_token.last_used_ip == "192.168.0.1"
elif r_token.client_id == "dummy-client":
# verify the second refresh token
assert r_token.last_used_at is None
assert r_token.last_used_ip is None
else:
assert False, "Unknown client_id: %s" % r_token.client_id
async def test_cannot_retrieve_expired_access_token(hass):
"""Test that we cannot retrieve expired access tokens."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.user.id is user.id
assert refresh_token.client_id == CLIENT_ID
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is refresh_token
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow()
- auth_const.ACCESS_TOKEN_EXPIRATION
- timedelta(seconds=11),
):
access_token = manager.async_create_access_token(refresh_token)
assert await manager.async_validate_access_token(access_token) is None
async def test_generating_system_user(hass):
"""Test that we can add a system user."""
events = []
@callback
def user_added(event):
events.append(event)
hass.bus.async_listen("user_added", user_added)
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
token = await manager.async_create_refresh_token(user)
assert user.system_generated
assert token is not None
assert token.client_id is None
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_refresh_token_requires_client_for_user(hass):
"""Test create refresh token for a user with client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
assert user.system_generated is False
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user)
token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
# default access token expiration
assert token.access_token_expiration == auth_const.ACCESS_TOKEN_EXPIRATION
async def test_refresh_token_not_requires_client_for_system_user(hass):
"""Test create refresh token for a system user w/o client_id."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user("Hass.io")
assert user.system_generated is True
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user, CLIENT_ID)
token = await manager.async_create_refresh_token(user)
assert token is not None
assert token.client_id is None
assert token.token_type == auth_models.TOKEN_TYPE_SYSTEM
async def test_refresh_token_with_specific_access_token_expiration(hass):
"""Test create a refresh token with specific access token expiration."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, access_token_expiration=timedelta(days=100)
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.access_token_expiration == timedelta(days=100)
async def test_refresh_token_type(hass):
"""Test create a refresh token with token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_SYSTEM
)
token = await manager.async_create_refresh_token(
user, CLIENT_ID, token_type=auth_models.TOKEN_TYPE_NORMAL
)
assert token is not None
assert token.client_id == CLIENT_ID
assert token.token_type == auth_models.TOKEN_TYPE_NORMAL
async def test_refresh_token_type_long_lived_access_token(hass):
"""Test create a refresh token has long-lived access token type."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user, token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
)
token = await manager.async_create_refresh_token(
user,
client_name="GPS LOGGER",
client_icon="mdi:home",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
)
assert token is not None
assert token.client_id is None
assert token.client_name == "GPS LOGGER"
assert token.client_icon == "mdi:home"
assert token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
async def test_cannot_deactive_owner(mock_hass):
"""Test that we cannot deactivate the owner."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
owner = MockUser(is_owner=True).add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_deactivate_user(owner)
async def test_remove_refresh_token(mock_hass):
"""Test that we can remove a refresh token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
access_token = manager.async_create_access_token(refresh_token)
await manager.async_remove_refresh_token(refresh_token)
assert await manager.async_get_refresh_token(refresh_token.id) is None
assert await manager.async_validate_access_token(access_token) is None
async def test_create_access_token(mock_hass):
"""Test normal refresh_token's jwt_key keep same after used."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_NORMAL
jwt_key = refresh_token.jwt_key
access_token = manager.async_create_access_token(refresh_token)
assert access_token is not None
assert refresh_token.jwt_key == jwt_key
jwt_payload = jwt.decode(access_token, jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(minutes=30).total_seconds()
)
async def test_create_long_lived_access_token(mock_hass):
"""Test refresh_token's jwt_key changed for long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=300),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_payload = jwt.decode(access_token, refresh_token.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=300).total_seconds()
)
async def test_one_long_lived_access_token_per_refresh_token(mock_hass):
"""Test one refresh_token can only have one long-lived access token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token = manager.async_create_access_token(refresh_token)
jwt_key = refresh_token.jwt_key
rt = await manager.async_validate_access_token(access_token)
assert rt.id == refresh_token.id
with pytest.raises(ValueError):
await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
await manager.async_remove_refresh_token(refresh_token)
assert refresh_token.id not in user.refresh_tokens
rt = await manager.async_validate_access_token(access_token)
assert rt is None, "Previous issued access token has been invoked"
refresh_token_2 = await manager.async_create_refresh_token(
user,
client_name="GPS Logger",
token_type=auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=3000),
)
assert refresh_token_2.id != refresh_token.id
assert refresh_token_2.token_type == auth_models.TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN
access_token_2 = manager.async_create_access_token(refresh_token_2)
jwt_key_2 = refresh_token_2.jwt_key
assert access_token != access_token_2
assert jwt_key != jwt_key_2
rt = await manager.async_validate_access_token(access_token_2)
jwt_payload = jwt.decode(access_token_2, rt.jwt_key, algorithm=["HS256"])
assert jwt_payload["iss"] == refresh_token_2.id
assert (
jwt_payload["exp"] - jwt_payload["iat"] == timedelta(days=3000).total_seconds()
)
async def test_login_with_auth_module(mock_hass):
"""Test login as existing user with auth module."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request auth module input form
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "invalid-pin"}
)
# Invalid code error
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
assert step["errors"] == {"base": "invalid_code"}
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_login_with_multi_auth_module(mock_hass):
"""Test login as existing user with multiple auth modules."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
},
{
"type": "insecure_example",
"id": "module2",
"data": [{"user_id": "mock-user", "pin": "test-pin2"}],
},
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
# After auth_provider validated, request select auth module
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "select_mfa_module"
step = await manager.login_flow.async_configure(
step["flow_id"], {"multi_factor_auth_module": "module2"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin2"}
)
# Finally passed, get user
assert step["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step["result"]
assert user is not None
assert user.id == "mock-user"
assert user.is_owner is False
assert user.is_active is False
assert user.name == "Paulus"
async def test_auth_module_expired_session(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[
{
"type": "insecure_example",
"data": [{"user_id": "mock-user", "pin": "test-pin"}],
}
],
)
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
step = await manager.login_flow.async_init(("insecure_example", None))
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
assert step["type"] == data_entry_flow.RESULT_TYPE_FORM
assert step["step_id"] == "mfa"
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow() + MFA_SESSION_EXPIRATION,
):
step = await manager.login_flow.async_configure(
step["flow_id"], {"pin": "test-pin"}
)
# login flow abort due session timeout
assert step["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert step["reason"] == "login_expired"
async def test_enable_mfa_for_user(hass, hass_storage):
"""Test enable mfa module for user."""
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [{"username": "test-user", "password": "test-pass"}],
}
],
[{"type": "insecure_example", "data": []}],
)
step = await manager.login_flow.async_init(("insecure_example", None))
step = await manager.login_flow.async_configure(
step["flow_id"], {"username": "test-user", "password": "test-pass"}
)
user = step["result"]
assert user is not None
# new user don't have mfa enabled
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
module = manager.get_auth_mfa_module("insecure_example")
# mfa module don't have data
assert bool(module._data) is False
# test enable mfa for user
await manager.async_enable_user_mfa(user, "insecure_example", {"pin": "test-pin"})
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin"}
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# re-enable mfa for user will override
await manager.async_enable_user_mfa(
user, "insecure_example", {"pin": "test-pin-new"}
)
assert len(module._data) == 1
assert module._data[0] == {"user_id": user.id, "pin": "test-pin-new"}
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert "insecure_example" in modules
# system user cannot enable mfa
system_user = await manager.async_create_system_user("system-user")
with pytest.raises(ValueError):
await manager.async_enable_user_mfa(
system_user, "insecure_example", {"pin": "test-pin"}
)
assert len(module._data) == 1
modules = await manager.async_get_enabled_mfa(system_user)
assert len(modules) == 0
# disable mfa for user
await manager.async_disable_user_mfa(user, "insecure_example")
assert bool(module._data) is False
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
# disable mfa for user don't enabled just silent fail
await manager.async_disable_user_mfa(user, "insecure_example")
async def test_async_remove_user(hass):
"""Test removing a user."""
events = []
@callback
def user_removed(event):
events.append(event)
hass.bus.async_listen("user_removed", user_removed)
manager = await auth.auth_manager_from_config(
hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
}
],
}
],
[],
)
hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id="mock-user", is_owner=False, is_active=False, name="Paulus"
).add_to_auth_manager(manager)
user.credentials.append(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
)
assert len(user.credentials) == 1
await hass.auth.async_remove_user(user)
assert len(await manager.async_get_users()) == 0
assert len(user.credentials) == 0
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["user_id"] == user.id
async def test_new_users(mock_hass):
"""Test newly created users."""
manager = await auth.auth_manager_from_config(
mock_hass,
[
{
"type": "insecure_example",
"users": [
{
"username": "test-user",
"password": "test-pass",
"name": "Test Name",
},
{
"username": "test-user-2",
"password": "test-pass",
"name": "Test Name",
},
{
"username": "test-user-3",
"password": "test-pass",
"name": "Test Name",
},
],
}
],
[],
)
ensure_auth_manager_loaded(manager)
user = await manager.async_create_user("Hello")
# first user in the system is owner and admin
assert user.is_owner
assert user.is_admin
assert user.groups == []
user = await manager.async_create_user("Hello 2")
assert not user.is_admin
assert user.groups == []
user = await manager.async_create_user("Hello 3", ["system-admin"])
assert user.is_admin
assert user.groups[0].id == "system-admin"
user_cred = await manager.async_get_or_create_user(
auth_models.Credentials(
id="mock-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=True,
)
)
assert user_cred.is_admin
|
apache-2.0
| -3,012,945,878,521,766,000 | -4,238,550,165,747,992,600 | 32.820942 | 88 | 0.581071 | false |
chintak/scikit-image
|
skimage/feature/util.py
|
1
|
4726
|
import numpy as np
from skimage.util import img_as_float
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image)
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
|
bsd-3-clause
| -7,205,358,475,343,874,000 | 7,904,585,647,556,870,000 | 28.354037 | 79 | 0.585485 | false |
mbrukman/delayed-replay
|
tests/proxy_test.py
|
1
|
1244
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Tests for functionality in the proxy.py file.
import proxy
import unittest
class GetTargetUrlTest(unittest.TestCase):
def testSimple(self):
cases = [
('foo/bar', '/?q=foo/bar'),
('/home/~user', '/?q=/home/%7Euser')
]
for expected, path in cases:
actual = proxy.GetTargetUrl(path)
if expected != actual:
print 'Failed conversion for %s' % path
print 'expected: %s' % expected
print ' actual: %s' % actual
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -9,171,445,366,459,338,000 | 4,425,266,674,708,120,600 | 29.341463 | 80 | 0.634244 | false |
rpatterson/test-har
|
test_har/tests/test_requests.py
|
1
|
3214
|
"""
Test using HAR files in Python tests against the requests library.
"""
import json
import requests
import requests_mock
from test_har import requests_har as test_har
from test_har import tests
class HARDogfoodRequestsTests(tests.HARDogfoodTestCase, test_har.HARTestCase):
"""
Test using HAR files in Python tests against the requests library.
"""
RESPONSE_TYPE = requests.Response
def setUp(self):
"""
Start the mocker, mock the example HAR response, and register cleanup.
"""
super(HARDogfoodRequestsTests, self).setUp()
self.mocker = requests_mock.Mocker()
self.mocker.start()
self.addCleanup(self.mocker.stop)
self.headers = test_har.array_to_dict(
self.entry["response"]["headers"])
self.headers['Content-Type'] = self.entry[
"response"]["content"]["mimeType"]
# Insert a key into the response
# about which HAR response makes no assertion
content = dict(
self.entry["response"]["content"]["text"],
email='[email protected]')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(content))
def test_non_json(self):
"""
Mock the requests library non-JSON response.
"""
self.entry["response"]["content"]["mimeType"] = "text/html"
self.entry["response"]["content"]["text"] = (
'<html><body>Foo HTML body</body></html>')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=dict(self.headers, **{'Content-Type': self.entry[
"response"]["content"]["mimeType"]}),
text=self.entry["response"]["content"]["text"])
super(HARDogfoodRequestsTests, self).test_non_json()
def test_missing_content_type(self):
"""
Fail when the response is missing the content/MIME type.
"""
self.headers.pop('Content-Type')
self.mocker.post(
self.entry["request"]["url"],
status_code=self.entry["response"]["status"],
reason=self.entry["response"]["statusText"],
headers=self.headers,
text=json.dumps(self.entry["response"]["content"]["text"]))
with self.assertRaises(AssertionError) as har_failures:
self.assertHAR(self.example)
self.assertIn(
'content/mimeType', har_failures.exception.args[0],
'Assertion exception missing MIME type detail')
# BBB Python 2.7 str vs unicode compat
with self.assertRaises(AssertionError) as expected:
self.assertIn(
'Content-Type', self.headers,
'Missing response content type')
self.assertEqual(
har_failures.exception.args[0]['content/mimeType'].args,
expected.exception.args,
'Wrong missing response MIME type failure assertion')
|
gpl-3.0
| -4,652,546,929,563,303,000 | -8,315,890,082,952,089,000 | 35.11236 | 78 | 0.59552 | false |
pavlov99/jsonapi
|
jsonapi/utils.py
|
1
|
2220
|
""" JSON:API utils."""
class _classproperty(property):
""" Implement property behaviour for classes.
class A():
@_classproperty
@classmethod
def name(cls):
return cls.__name__
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)()
def _cached(f):
""" Decorator that makes a method cached."""
attr_name = '_cached_' + f.__name__
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, attr_name):
setattr(obj, attr_name, f(obj, *args, **kwargs))
return getattr(obj, attr_name)
return wrapper
classproperty = lambda f: _classproperty(classmethod(f))
cached_property = lambda f: property(_cached(f))
cached_classproperty = lambda f: classproperty(_cached(f))
class Choices(object):
""" Choices."""
def __init__(self, *choices):
self._choices = []
self._choice_dict = {}
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 2:
choice = (choice[0], choice[1], choice[1])
elif len(choice) != 3:
raise ValueError(
"Choices can't handle a list/tuple of length {0}, only\
2 or 3".format(choice))
else:
choice = (choice, choice, choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __iter__(self):
return iter(self._choices)
def __getitem__(self, index):
return self._choices[index]
def __delitem__(self, index):
del self._choices[index]
def __setitem__(self, index, value):
self._choices[index] = value
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
self._choices
)
def __len__(self):
return len(self._choices)
def __contains__(self, element):
return element in self._choice_dict.values()
|
mit
| -5,317,807,033,620,740,000 | -8,083,565,052,920,985,000 | 23.94382 | 79 | 0.530631 | false |
NicWayand/xray
|
xarray/plot/utils.py
|
1
|
6442
|
import pkg_resources
import numpy as np
import pandas as pd
from ..core.pycompat import basestring
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
return LinearSegmentedColormap.from_list('viridis', cm_data)
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'max'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except ImportError:
# if that fails, use matplotlib
# in this case, is there any difference between mpl and seaborn?
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ImportError, ValueError):
# ValueError is raised when seaborn doesn't like a colormap
# (e.g. jet). If that fails, use matplotlib
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
except ValueError:
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, cnorm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Adapted from Seaborn:
https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
ROBUST_PERCENTILE = 2.0
import matplotlib as mpl
calc_data = np.ravel(plot_data[~pd.isnull(plot_data)])
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# vlim might be computed below
vlim = None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
# kwargs not specific about divergent or not: infer defaults from data
divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if isinstance(levels, int):
ticker = mpl.ticker.MaxNLocator(levels)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, norm=cnorm)
def _infer_xy_labels(darray, x, y):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array.
"""
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None or y is None:
raise ValueError('cannot supply only one of x and y')
elif any(k not in darray.coords for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
|
apache-2.0
| 3,632,735,148,708,384,300 | 4,576,225,351,753,868,300 | 28.686636 | 78 | 0.603695 | false |
j5shi/Thruster
|
pylibs/test/test_mutex.py
|
4
|
1034
|
import unittest
import test.test_support
mutex = test.test_support.import_module("mutex", deprecated=True)
class MutexTest(unittest.TestCase):
def test_lock_and_unlock(self):
def called_by_mutex(some_data):
self.assertEqual(some_data, "spam")
self.assertTrue(m.test(), "mutex not held")
# Nested locking
m.lock(called_by_mutex2, "eggs")
def called_by_mutex2(some_data):
self.assertEqual(some_data, "eggs")
self.assertTrue(m.test(), "mutex not held")
self.assertTrue(ready_for_2,
"called_by_mutex2 called too soon")
m = mutex.mutex()
read_for_2 = False
m.lock(called_by_mutex, "spam")
ready_for_2 = True
# unlock both locks
m.unlock()
m.unlock()
self.assertFalse(m.test(), "mutex still held")
def test_main():
test.test_support.run_unittest(MutexTest)
if __name__ == "__main__":
test_main()
|
gpl-2.0
| -7,666,017,146,036,764,000 | -6,740,635,409,432,989,000 | 27.542857 | 65 | 0.558027 | false |
zofuthan/edx-platform
|
common/lib/xmodule/xmodule/video_module/video_module.py
|
47
|
37504
|
# -*- coding: utf-8 -*-
# pylint: disable=abstract-method
"""Video is ungraded Xmodule for support video content.
It's new improved video module, which support additional feature:
- Can play non-YouTube video sources via in-browser HTML5 video player.
- YouTube defaults to HTML5 mode from the start.
- Speed changes in both YouTube and non-YouTube videos happen via
in-browser HTML5 video method (when in HTML5 mode).
- Navigational subtitles can be disabled altogether via an attribute
in XML.
Examples of html5 videos for manual testing:
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.mp4
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.webm
https://s3.amazonaws.com/edx-course-videos/edx-intro/edX-FA12-cware-1_100.ogv
"""
import copy
import json
import logging
import random
from collections import OrderedDict
from operator import itemgetter
from lxml import etree
from pkg_resources import resource_string
from django.conf import settings
from openedx.core.lib.cache_utils import memoize_in_request_cache
from xblock.core import XBlock
from xblock.fields import ScopeIds
from xblock.runtime import KvsFieldData
from xmodule.modulestore.inheritance import InheritanceKeyValueStore, own_metadata
from xmodule.x_module import XModule, module_attr
from xmodule.editing_module import TabsEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.xml_module import is_pointer_tag, name_to_pathname, deserialize_field
from xmodule.exceptions import NotFoundError
from .transcripts_utils import VideoTranscriptsMixin
from .video_utils import create_youtube_string, get_video_from_cdn, get_poster
from .bumper_utils import bumperize
from .video_xfields import VideoFields
from .video_handlers import VideoStudentViewHandlers, VideoStudioViewHandlers
from xmodule.video_module import manage_video_subtitles_save
from xmodule.mixin import LicenseMixin
# The following import/except block for edxval is temporary measure until
# edxval is a proper XBlock Runtime Service.
#
# Here's the deal: the VideoModule should be able to take advantage of edx-val
# (https://github.com/edx/edx-val) to figure out what URL to give for video
# resources that have an edx_video_id specified. edx-val is a Django app, and
# including it causes tests to fail because we run common/lib tests standalone
# without Django dependencies. The alternatives seem to be:
#
# 1. Move VideoModule out of edx-platform.
# 2. Accept the Django dependency in common/lib.
# 3. Try to import, catch the exception on failure, and check for the existence
# of edxval_api before invoking it in the code.
# 4. Make edxval an XBlock Runtime Service
#
# (1) is a longer term goal. VideoModule should be made into an XBlock and
# extracted from edx-platform entirely. But that's expensive to do because of
# the various dependencies (like templates). Need to sort this out.
# (2) is explicitly discouraged.
# (3) is what we're doing today. The code is still functional when called within
# the context of the LMS, but does not cause failure on import when running
# standalone tests. Most VideoModule tests tend to be in the LMS anyway,
# probably for historical reasons, so we're not making things notably worse.
# (4) is one of the next items on the backlog for edxval, and should get rid
# of this particular import silliness. It's just that I haven't made one before,
# and I was worried about trying it with my deadline constraints.
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
try:
from branding.models import BrandingInfoConfig
except ImportError:
BrandingInfoConfig = None
log = logging.getLogger(__name__)
_ = lambda text: text
@XBlock.wants('settings')
class VideoModule(VideoFields, VideoTranscriptsMixin, VideoStudentViewHandlers, XModule, LicenseMixin):
"""
XML source example:
<video show_captions="true"
youtube="0.75:jNCf2gIqpeE,1.0:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg"
url_name="lecture_21_3" display_name="S19V3: Vacancies"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</video>
"""
video_time = 0
icon_class = 'video'
# To make sure that js files are called in proper order we use numerical
# index. We do that to avoid issues that occurs in tests.
module = __name__.replace('.video_module', '', 2)
js = {
'js': [
resource_string(module, 'js/src/video/00_component.js'),
resource_string(module, 'js/src/video/00_video_storage.js'),
resource_string(module, 'js/src/video/00_resizer.js'),
resource_string(module, 'js/src/video/00_async_process.js'),
resource_string(module, 'js/src/video/00_i18n.js'),
resource_string(module, 'js/src/video/00_sjson.js'),
resource_string(module, 'js/src/video/00_iterator.js'),
resource_string(module, 'js/src/video/01_initialize.js'),
resource_string(module, 'js/src/video/025_focus_grabber.js'),
resource_string(module, 'js/src/video/02_html5_video.js'),
resource_string(module, 'js/src/video/03_video_player.js'),
resource_string(module, 'js/src/video/035_video_accessible_menu.js'),
resource_string(module, 'js/src/video/04_video_control.js'),
resource_string(module, 'js/src/video/04_video_full_screen.js'),
resource_string(module, 'js/src/video/05_video_quality_control.js'),
resource_string(module, 'js/src/video/06_video_progress_slider.js'),
resource_string(module, 'js/src/video/07_video_volume_control.js'),
resource_string(module, 'js/src/video/08_video_speed_control.js'),
resource_string(module, 'js/src/video/09_video_caption.js'),
resource_string(module, 'js/src/video/09_play_placeholder.js'),
resource_string(module, 'js/src/video/09_play_pause_control.js'),
resource_string(module, 'js/src/video/09_play_skip_control.js'),
resource_string(module, 'js/src/video/09_skip_control.js'),
resource_string(module, 'js/src/video/09_bumper.js'),
resource_string(module, 'js/src/video/09_save_state_plugin.js'),
resource_string(module, 'js/src/video/09_events_plugin.js'),
resource_string(module, 'js/src/video/09_events_bumper_plugin.js'),
resource_string(module, 'js/src/video/09_poster.js'),
resource_string(module, 'js/src/video/095_video_context_menu.js'),
resource_string(module, 'js/src/video/10_commands.js'),
resource_string(module, 'js/src/video/10_main.js')
]
}
css = {'scss': [
resource_string(module, 'css/video/display.scss'),
resource_string(module, 'css/video/accessible_menu.scss'),
]}
js_module_name = "Video"
def get_transcripts_for_student(self, transcripts):
"""Return transcript information necessary for rendering the XModule student view.
This is more or less a direct extraction from `get_html`.
Args:
transcripts (dict): A dict with all transcripts and a sub.
Returns:
Tuple of (track_url, transcript_language, sorted_languages)
track_url -> subtitle download url
transcript_language -> default transcript language
sorted_languages -> dictionary of available transcript languages
"""
track_url = None
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.download_track:
if self.track:
track_url = self.track
elif sub or other_lang:
track_url = self.runtime.handler_url(self, 'transcript', 'download').rstrip('/?')
transcript_language = self.get_default_transcript_language(transcripts)
native_languages = {lang: label for lang, label in settings.LANGUAGES if len(lang) == 2}
languages = {
lang: native_languages.get(lang, display)
for lang, display in settings.ALL_LANGUAGES
if lang in other_lang
}
if not other_lang or (other_lang and sub):
languages['en'] = 'English'
# OrderedDict for easy testing of rendered context in tests
sorted_languages = sorted(languages.items(), key=itemgetter(1))
sorted_languages = OrderedDict(sorted_languages)
return track_url, transcript_language, sorted_languages
def get_html(self):
transcript_download_format = self.transcript_download_format if not (self.download_track and self.track) else None
sources = filter(None, self.html5_sources)
download_video_link = None
branding_info = None
youtube_streams = ""
# If we have an edx_video_id, we prefer its values over what we store
# internally for download links (source, html5_sources) and the youtube
# stream.
if self.edx_video_id and edxval_api:
try:
val_profiles = ["youtube", "desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(self.edx_video_id, val_profiles)
# VAL will always give us the keys for the profiles we asked for, but
# if it doesn't have an encoded video entry for that Video + Profile, the
# value will map to `None`
# add the non-youtube urls to the list of alternative sources
# use the last non-None non-youtube url as the link to download the video
for url in [val_video_urls[p] for p in val_profiles if p != "youtube"]:
if url:
if url not in sources:
sources.append(url)
if self.download_video:
download_video_link = url
# set the youtube url
if val_video_urls["youtube"]:
youtube_streams = "1.00:{}".format(val_video_urls["youtube"])
except edxval_api.ValInternalError:
# VAL raises this exception if it can't find data for the edx video ID. This can happen if the
# course data is ported to a machine that does not have the VAL data. So for now, pass on this
# exception and fallback to whatever we find in the VideoDescriptor.
log.warning("Could not retrieve information from VAL for edx Video ID: %s.", self.edx_video_id)
# If the user comes from China use China CDN for html5 videos.
# 'CN' is China ISO 3166-1 country code.
# Video caching is disabled for Studio. User_location is always None in Studio.
# CountryMiddleware disabled for Studio.
cdn_url = getattr(settings, 'VIDEO_CDN_URL', {}).get(self.system.user_location)
if getattr(self, 'video_speed_optimizations', True) and cdn_url:
branding_info = BrandingInfoConfig.get_config().get(self.system.user_location)
for index, source_url in enumerate(sources):
new_url = get_video_from_cdn(cdn_url, source_url)
if new_url:
sources[index] = new_url
# If there was no edx_video_id, or if there was no download specified
# for it, we fall back on whatever we find in the VideoDescriptor
if not download_video_link and self.download_video:
if self.source:
download_video_link = self.source
elif self.html5_sources:
download_video_link = self.html5_sources[0]
track_url, transcript_language, sorted_languages = self.get_transcripts_for_student(self.get_transcripts_info())
# CDN_VIDEO_URLS is only to be used here and will be deleted
# TODO([email protected]): Delete this after the CDN experiment has completed.
html_id = self.location.html_id()
if self.system.user_location == 'CN' and \
settings.FEATURES.get('ENABLE_VIDEO_BEACON', False) and \
html_id in getattr(settings, 'CDN_VIDEO_URLS', {}).keys():
cdn_urls = getattr(settings, 'CDN_VIDEO_URLS', {})[html_id]
cdn_exp_group, new_source = random.choice(zip(range(len(cdn_urls)), cdn_urls))
if cdn_exp_group > 0:
sources[0] = new_source
cdn_eval = True
else:
cdn_eval = False
cdn_exp_group = None
self.youtube_streams = youtube_streams or create_youtube_string(self) # pylint: disable=W0201
settings_service = self.runtime.service(self, 'settings')
yt_api_key = None
if settings_service:
xblock_settings = settings_service.get_settings_bucket(self)
if xblock_settings and 'YOUTUBE_API_KEY' in xblock_settings:
yt_api_key = xblock_settings['YOUTUBE_API_KEY']
metadata = {
'saveStateUrl': self.system.ajax_url + '/save_user_state',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', False),
'streams': self.youtube_streams,
'sub': self.sub,
'sources': sources,
# This won't work when we move to data that
# isn't on the filesystem
'captionDataDir': getattr(self, 'data_dir', None),
'showCaptions': json.dumps(self.show_captions),
'generalSpeed': self.global_speed,
'speed': self.speed,
'savedVideoPosition': self.saved_video_position.total_seconds(),
'start': self.start_time.total_seconds(),
'end': self.end_time.total_seconds(),
'transcriptLanguage': transcript_language,
'transcriptLanguages': sorted_languages,
# TODO: Later on the value 1500 should be taken from some global
# configuration setting field.
'ytTestTimeout': 1500,
'ytApiUrl': settings.YOUTUBE['API'],
'ytMetadataUrl': settings.YOUTUBE['METADATA_URL'],
'ytKey': yt_api_key,
'transcriptTranslationUrl': self.runtime.handler_url(
self, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.runtime.handler_url(
self, 'transcript', 'available_translations'
).rstrip('/?'),
## For now, the option "data-autohide-html5" is hard coded. This option
## either enables or disables autohiding of controls and captions on mouse
## inactivity. If set to true, controls and captions will autohide for
## HTML5 sources (non-YouTube) after a period of mouse inactivity over the
## whole video. When the mouse moves (or a key is pressed while any part of
## the video player is focused), the captions and controls will be shown
## once again.
##
## There is no option in the "Advanced Editor" to set this option. However,
## this option will have an effect if changed to "True". The code on
## front-end exists.
'autohideHtml5': False
}
bumperize(self)
context = {
'bumper_metadata': json.dumps(self.bumper['metadata']), # pylint: disable=E1101
'metadata': json.dumps(OrderedDict(metadata)),
'poster': json.dumps(get_poster(self)),
'branding_info': branding_info,
'cdn_eval': cdn_eval,
'cdn_exp_group': cdn_exp_group,
'id': self.location.html_id(),
'display_name': self.display_name_with_default,
'handout': self.handout,
'download_video_link': download_video_link,
'track': track_url,
'transcript_download_format': transcript_download_format,
'transcript_download_formats_list': self.descriptor.fields['transcript_download_format'].values,
'license': getattr(self, "license", None),
}
return self.system.render_template('video.html', context)
@XBlock.wants("request_cache")
@XBlock.wants("settings")
class VideoDescriptor(VideoFields, VideoTranscriptsMixin, VideoStudioViewHandlers,
TabsEditingDescriptor, EmptyDataRawDescriptor, LicenseMixin):
"""
Descriptor for `VideoModule`.
"""
module_class = VideoModule
transcript = module_attr('transcript')
show_in_read_only_mode = True
tabs = [
{
'name': _("Basic"),
'template': "video/transcripts.html",
'current': True
},
{
'name': _("Advanced"),
'template': "tabs/metadata-edit-tab.html"
}
]
def __init__(self, *args, **kwargs):
"""
Mostly handles backward compatibility issues.
`source` is deprecated field.
a) If `source` exists and `source` is not `html5_sources`: show `source`
field on front-end as not-editable but clearable. Dropdown is a new
field `download_video` and it has value True.
b) If `source` is cleared it is not shown anymore.
c) If `source` exists and `source` in `html5_sources`, do not show `source`
field. `download_video` field has value True.
"""
super(VideoDescriptor, self).__init__(*args, **kwargs)
# For backwards compatibility -- if we've got XML data, parse it out and set the metadata fields
if self.data:
field_data = self._parse_video_xml(etree.fromstring(self.data))
self._field_data.set_many(self, field_data)
del self.data
self.source_visible = False
if self.source:
# If `source` field value exist in the `html5_sources` field values,
# then delete `source` field value and use value from `html5_sources` field.
if self.source in self.html5_sources:
self.source = '' # Delete source field value.
self.download_video = True
else: # Otherwise, `source` field value will be used.
self.source_visible = True
if not self.fields['download_video'].is_set_on(self):
self.download_video = True
# Force download_video field to default value if it's not explicitly set for backward compatibility.
if not self.fields['download_video'].is_set_on(self):
self.download_video = self.download_video
self.force_save_fields(['download_video'])
# for backward compatibility.
# If course was existed and was not re-imported by the moment of adding `download_track` field,
# we should enable `download_track` if following is true:
if not self.fields['download_track'].is_set_on(self) and self.track:
self.download_track = True
def editor_saved(self, user, old_metadata, old_content):
"""
Used to update video values during `self`:save method from CMS.
old_metadata: dict, values of fields of `self` with scope=settings which were explicitly set by user.
old_content, same as `old_metadata` but for scope=content.
Due to nature of code flow in item.py::_save_item, before current function is called,
fields of `self` instance have been already updated, but not yet saved.
To obtain values, which were changed by user input,
one should compare own_metadata(self) and old_medatada.
Video player has two tabs, and due to nature of sync between tabs,
metadata from Basic tab is always sent when video player is edited and saved first time, for example:
{'youtube_id_1_0': u'3_yD_cEKoCk', 'display_name': u'Video', 'sub': u'3_yD_cEKoCk', 'html5_sources': []},
that's why these fields will always present in old_metadata after first save. This should be fixed.
At consequent save requests html5_sources are always sent too, disregard of their change by user.
That means that html5_sources are always in list of fields that were changed (`metadata` param in save_item).
This should be fixed too.
"""
metadata_was_changed_by_user = old_metadata != own_metadata(self)
if metadata_was_changed_by_user:
manage_video_subtitles_save(
self,
user,
old_metadata if old_metadata else None,
generate_translation=True
)
def save_with_metadata(self, user):
"""
Save module with updated metadata to database."
"""
self.save()
self.runtime.modulestore.update_item(self, user.id)
@property
def editable_metadata_fields(self):
editable_fields = super(VideoDescriptor, self).editable_metadata_fields
settings_service = self.runtime.service(self, 'settings')
if settings_service:
xb_settings = settings_service.get_settings_bucket(self)
if not xb_settings.get("licensing_enabled", False) and "license" in editable_fields:
del editable_fields["license"]
if self.source_visible:
editable_fields['source']['non_editable'] = True
else:
editable_fields.pop('source')
languages = [{'label': label, 'code': lang} for lang, label in settings.ALL_LANGUAGES if lang != u'en']
languages.sort(key=lambda l: l['label'])
editable_fields['transcripts']['languages'] = languages
editable_fields['transcripts']['type'] = 'VideoTranslations'
editable_fields['transcripts']['urlRoot'] = self.runtime.handler_url(self, 'studio_transcript', 'translation').rstrip('/?')
editable_fields['handout']['type'] = 'FileUploader'
return editable_fields
@classmethod
def from_xml(cls, xml_data, system, id_generator):
"""
Creates an instance of this descriptor from the supplied xml_data.
This may be overridden by subclasses
xml_data: A string of xml that will be translated into data and children for
this module
system: A DescriptorSystem for interacting with external resources
id_generator is used to generate course-specific urls and identifiers
"""
xml_object = etree.fromstring(xml_data)
url_name = xml_object.get('url_name', xml_object.get('slug'))
block_type = 'video'
definition_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(definition_id)
if is_pointer_tag(xml_object):
filepath = cls._format_filepath(xml_object.tag, name_to_pathname(url_name))
xml_object = cls.load_file(filepath, system.resources_fs, usage_id)
system.parse_asides(xml_object, definition_id, usage_id, id_generator)
field_data = cls._parse_video_xml(xml_object, id_generator)
kvs = InheritanceKeyValueStore(initial_values=field_data)
field_data = KvsFieldData(kvs)
video = system.construct_xblock_from_class(
cls,
# We're loading a descriptor, so student_id is meaningless
# We also don't have separate notions of definition and usage ids yet,
# so we use the location for both
ScopeIds(None, block_type, definition_id, usage_id),
field_data,
)
return video
def definition_to_xml(self, resource_fs):
"""
Returns an xml string representing this module.
"""
xml = etree.Element('video')
youtube_string = create_youtube_string(self)
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't need to write it out.
if youtube_string and youtube_string != '1.00:3_yD_cEKoCk':
xml.set('youtube', unicode(youtube_string))
xml.set('url_name', self.url_name)
attrs = {
'display_name': self.display_name,
'show_captions': json.dumps(self.show_captions),
'start_time': self.start_time,
'end_time': self.end_time,
'sub': self.sub,
'download_track': json.dumps(self.download_track),
'download_video': json.dumps(self.download_video),
}
for key, value in attrs.items():
# Mild workaround to ensure that tests pass -- if a field
# is set to its default value, we don't write it out.
if value:
if key in self.fields and self.fields[key].is_set_on(self):
xml.set(key, unicode(value))
for source in self.html5_sources:
ele = etree.Element('source')
ele.set('src', source)
xml.append(ele)
if self.track:
ele = etree.Element('track')
ele.set('src', self.track)
xml.append(ele)
if self.handout:
ele = etree.Element('handout')
ele.set('src', self.handout)
xml.append(ele)
# sorting for easy testing of resulting xml
for transcript_language in sorted(self.transcripts.keys()):
ele = etree.Element('transcript')
ele.set('language', transcript_language)
ele.set('src', self.transcripts[transcript_language])
xml.append(ele)
if self.edx_video_id and edxval_api:
try:
xml.append(edxval_api.export_to_xml(self.edx_video_id))
except edxval_api.ValVideoNotFoundError:
pass
# handle license specifically
self.add_license_to_xml(xml)
return xml
def get_context(self):
"""
Extend context by data for transcript basic tab.
"""
_context = super(VideoDescriptor, self).get_context()
metadata_fields = copy.deepcopy(self.editable_metadata_fields)
display_name = metadata_fields['display_name']
video_url = metadata_fields['html5_sources']
youtube_id_1_0 = metadata_fields['youtube_id_1_0']
def get_youtube_link(video_id):
# First try a lookup in VAL. If we have a YouTube entry there, it overrides the
# one passed in.
if self.edx_video_id and edxval_api:
val_youtube_id = edxval_api.get_url_for_profile(self.edx_video_id, "youtube")
if val_youtube_id:
video_id = val_youtube_id
if video_id:
return 'http://youtu.be/{0}'.format(video_id)
else:
return ''
_ = self.runtime.service(self, "i18n").ugettext
video_url.update({
'help': _('The URL for your video. This can be a YouTube URL or a link to an .mp4, .ogg, or .webm video file hosted elsewhere on the Internet.'),
'display_name': _('Default Video URL'),
'field_name': 'video_url',
'type': 'VideoList',
'default_value': [get_youtube_link(youtube_id_1_0['default_value'])]
})
youtube_id_1_0_value = get_youtube_link(youtube_id_1_0['value'])
if youtube_id_1_0_value:
video_url['value'].insert(0, youtube_id_1_0_value)
metadata = {
'display_name': display_name,
'video_url': video_url
}
_context.update({'transcripts_basic_tab_metadata': metadata})
return _context
@classmethod
def _parse_youtube(cls, data):
"""
Parses a string of Youtube IDs such as "1.0:AXdE34_U,1.5:VO3SxfeD"
into a dictionary. Necessary for backwards compatibility with
XML-based courses.
"""
ret = {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
videos = data.split(',')
for video in videos:
pieces = video.split(':')
try:
speed = '%.2f' % float(pieces[0]) # normalize speed
# Handle the fact that youtube IDs got double-quoted for a period of time.
# Note: we pass in "VideoFields.youtube_id_1_0" so we deserialize as a String--
# it doesn't matter what the actual speed is for the purposes of deserializing.
youtube_id = deserialize_field(cls.youtube_id_1_0, pieces[1])
ret[speed] = youtube_id
except (ValueError, IndexError):
log.warning('Invalid YouTube ID: %s', video)
return ret
@classmethod
def _parse_video_xml(cls, xml, id_generator=None):
"""
Parse video fields out of xml_data. The fields are set if they are
present in the XML.
Arguments:
id_generator is used to generate course-specific urls and identifiers
"""
field_data = {}
# Convert between key types for certain attributes --
# necessary for backwards compatibility.
conversions = {
# example: 'start_time': cls._example_convert_start_time
}
# Convert between key names for certain attributes --
# necessary for backwards compatibility.
compat_keys = {
'from': 'start_time',
'to': 'end_time'
}
sources = xml.findall('source')
if sources:
field_data['html5_sources'] = [ele.get('src') for ele in sources]
track = xml.find('track')
if track is not None:
field_data['track'] = track.get('src')
handout = xml.find('handout')
if handout is not None:
field_data['handout'] = handout.get('src')
transcripts = xml.findall('transcript')
if transcripts:
field_data['transcripts'] = {tr.get('language'): tr.get('src') for tr in transcripts}
for attr, value in xml.items():
if attr in compat_keys:
attr = compat_keys[attr]
if attr in cls.metadata_to_strip + ('url_name', 'name'):
continue
if attr == 'youtube':
speeds = cls._parse_youtube(value)
for speed, youtube_id in speeds.items():
# should have made these youtube_id_1_00 for
# cleanliness, but hindsight doesn't need glasses
normalized_speed = speed[:-1] if speed.endswith('0') else speed
# If the user has specified html5 sources, make sure we don't use the default video
if youtube_id != '' or 'html5_sources' in field_data:
field_data['youtube_id_{0}'.format(normalized_speed.replace('.', '_'))] = youtube_id
elif attr in conversions:
field_data[attr] = conversions[attr](value)
elif attr not in cls.fields:
field_data.setdefault('xml_attributes', {})[attr] = value
else:
# We export values with json.dumps (well, except for Strings, but
# for about a month we did it for Strings also).
field_data[attr] = deserialize_field(cls.fields[attr], value)
# For backwards compatibility: Add `source` if XML doesn't have `download_video`
# attribute.
if 'download_video' not in field_data and sources:
field_data['source'] = field_data['html5_sources'][0]
# For backwards compatibility: if XML doesn't have `download_track` attribute,
# it means that it is an old format. So, if `track` has some value,
# `download_track` needs to have value `True`.
if 'download_track' not in field_data and track is not None:
field_data['download_track'] = True
video_asset_elem = xml.find('video_asset')
if (
edxval_api and
video_asset_elem is not None and
'edx_video_id' in field_data
):
# Allow ValCannotCreateError to escape
edxval_api.import_from_xml(
video_asset_elem,
field_data['edx_video_id'],
course_id=getattr(id_generator, 'target_course_id', None)
)
# load license if it exists
field_data = LicenseMixin.parse_license_from_xml(field_data, xml)
return field_data
def index_dictionary(self):
xblock_body = super(VideoDescriptor, self).index_dictionary()
video_body = {
"display_name": self.display_name,
}
def _update_transcript_for_index(language=None):
""" Find video transcript - if not found, don't update index """
try:
transcripts = self.get_transcripts_info()
transcript = self.get_transcript(
transcripts, transcript_format='txt', lang=language
)[0].replace("\n", " ")
transcript_index_name = "transcript_{}".format(language if language else self.transcript_language)
video_body.update({transcript_index_name: transcript})
except NotFoundError:
pass
if self.sub:
_update_transcript_for_index()
# Check to see if there are transcripts in other languages besides default transcript
if self.transcripts:
for language in self.transcripts.keys():
_update_transcript_for_index(language)
if "content" in xblock_body:
xblock_body["content"].update(video_body)
else:
xblock_body["content"] = video_body
xblock_body["content_type"] = "Video"
return xblock_body
@property
def request_cache(self):
"""
Returns the request_cache from the runtime.
"""
return self.runtime.service(self, "request_cache")
@memoize_in_request_cache('request_cache')
def get_cached_val_data_for_course(self, video_profile_names, course_id):
"""
Returns the VAL data for the requested video profiles for the given course.
"""
return edxval_api.get_video_info_for_course_and_profiles(unicode(course_id), video_profile_names)
def student_view_json(self, context):
"""
Returns a JSON representation of the student_view of this XModule.
The contract of the JSON content is between the caller and the particular XModule.
"""
# If the "only_on_web" field is set on this video, do not return the rest of the video's data
# in this json view, since this video is to be accessed only through its web view."
if self.only_on_web:
return {"only_on_web": True}
encoded_videos = {}
val_video_data = {}
# Check in VAL data first if edx_video_id exists
if self.edx_video_id:
video_profile_names = context.get("profiles", [])
# get and cache bulk VAL data for course
val_course_data = self.get_cached_val_data_for_course(video_profile_names, self.location.course_key)
val_video_data = val_course_data.get(self.edx_video_id, {})
# Get the encoded videos if data from VAL is found
if val_video_data:
encoded_videos = val_video_data.get('profiles', {})
# If information for this edx_video_id is not found in the bulk course data, make a
# separate request for this individual edx_video_id, unless cache misses are disabled.
# This is useful/required for videos that don't have a course designated, such as the introductory video
# that is shared across many courses. However, this results in a separate database request so watch
# out for any performance hit if many such videos exist in a course. Set the 'allow_cache_miss' parameter
# to False to disable this fall back.
elif context.get("allow_cache_miss", "True").lower() == "true":
try:
val_video_data = edxval_api.get_video_info(self.edx_video_id)
# Unfortunately, the VAL API is inconsistent in how it returns the encodings, so remap here.
for enc_vid in val_video_data.pop('encoded_videos'):
encoded_videos[enc_vid['profile']] = {key: enc_vid[key] for key in ["url", "file_size"]}
except edxval_api.ValVideoNotFoundError:
pass
# Fall back to other video URLs in the video module if not found in VAL
if not encoded_videos:
video_url = self.html5_sources[0] if self.html5_sources else self.source
if video_url:
encoded_videos["fallback"] = {
"url": video_url,
"file_size": 0, # File size is unknown for fallback URLs
}
transcripts_info = self.get_transcripts_info()
transcripts = {
lang: self.runtime.handler_url(self, 'transcript', 'download', query="lang=" + lang, thirdparty=True)
for lang in self.available_translations(transcripts_info, verify_assets=False)
}
return {
"only_on_web": self.only_on_web,
"duration": val_video_data.get('duration', None),
"transcripts": transcripts,
"encoded_videos": encoded_videos,
}
|
agpl-3.0
| 9,168,950,232,499,573,000 | -8,702,335,138,789,660,000 | 44.294686 | 157 | 0.613455 | false |
shifter/grr
|
gui/api_value_renderers.py
|
4
|
12620
|
#!/usr/bin/env python
"""Renderers that render RDFValues into JSON compatible data structures."""
import base64
import inspect
import numbers
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import type_info
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
class ApiValueRenderer(object):
"""Baseclass for API renderers that render RDFValues."""
__metaclass__ = registry.MetaclassRegistry
value_class = object
_type_list_cache = {}
_renderers_cache = {}
@classmethod
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in ApiValueRenderer.classes.values():
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if aff4.issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError("No renderer found for value %s." %
value.__class__.__name__)
candidates = sorted(candidates,
key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists)
def __init__(self, limit_lists=-1):
super(ApiValueRenderer, self).__init__()
self.limit_lists = limit_lists
def _PassThrough(self, value):
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=self.limit_lists)
return renderer.RenderValue(value)
def _IncludeTypeInfo(self, result, original_value):
# Converted value is placed in the resulting dictionary under the 'value'
# key.
if hasattr(original_value, "age"):
age = original_value.age.AsSecondsFromEpoch()
else:
age = 0
return dict(type=original_value.__class__.__name__,
value=result,
age=age)
def RenderValue(self, value):
"""Renders given value into plain old python objects."""
return self._IncludeTypeInfo(utils.SmartUnicode(value), value)
def RenderMetadata(self, value_cls):
"""Renders metadata of a given value class.
Args:
value_cls: Metadata of this class will be rendered. This class is
guaranteed to be (or to be a subclass of) value_class.
Returns:
Dictionary with class metadata.
"""
result = dict(name=value_cls.__name__,
mro=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
kind="primitive")
try:
default_value = RenderValue(value_cls())
result["default"] = default_value
except Exception as e: # pylint: disable=broad-except
logging.debug("Can't create default for primitive %s: %s",
value_cls.__name__, e)
return result
class ApiNumberRenderer(ApiValueRenderer):
"""Renderer for numbers."""
value_class = numbers.Number
def RenderValue(self, value):
# Always render ints as longs - so that there's no ambiguity in the UI
# renderers when type depends on the value.
if isinstance(value, int):
value = long(value)
return self._IncludeTypeInfo(value, value)
class ApiStringRenderer(ApiValueRenderer):
"""Renderer for strings."""
value_class = basestring
def RenderValue(self, value):
return self._IncludeTypeInfo(utils.SmartUnicode(value), value)
class ApiEnumRenderer(ApiValueRenderer):
"""Renderer for deprecated (old-style) enums."""
value_class = rdf_structs.Enum
def RenderValue(self, value):
return self._IncludeTypeInfo(value.name, value)
class ApiEnumNamedValueRenderer(ApiValueRenderer):
"""Renderer for new-style enums."""
value_class = rdf_structs.EnumNamedValue
def RenderValue(self, value):
return self._IncludeTypeInfo(value.name, value)
class ApiDictRenderer(ApiValueRenderer):
"""Renderer for dicts."""
value_class = dict
def RenderValue(self, value):
result = {}
for k, v in value.items():
result[k] = self._PassThrough(v)
return self._IncludeTypeInfo(result, value)
class ApiRDFDictRenderer(ApiDictRenderer):
"""Renderer for RDF Dict instances."""
value_class = rdf_protodict.Dict
class FetchMoreLink(rdfvalue.RDFValue):
"""Stub used to display 'More data available...' link."""
class ApiListRenderer(ApiValueRenderer):
"""Renderer for lists."""
value_class = list
def RenderValue(self, value):
if self.limit_lists == 0:
return "<lists are omitted>"
elif self.limit_lists == -1:
return [self._PassThrough(v) for v in value]
else:
result = [self._PassThrough(v) for v in list(value)[:self.limit_lists]]
if len(value) > self.limit_lists:
result.append(dict(age=0,
type=FetchMoreLink.__name__,
url="to/be/implemented"))
return result
class ApiTupleRenderer(ApiListRenderer):
"""Renderer for tuples."""
value_class = tuple
class ApiSetRenderer(ApiListRenderer):
"""Renderer for sets."""
value_class = set
class ApiRepeatedFieldHelperRenderer(ApiListRenderer):
"""Renderer for repeated fields helpers."""
value_class = rdf_structs.RepeatedFieldHelper
class ApiRDFValueArrayRenderer(ApiListRenderer):
"""Renderer for RDFValueArray."""
value_class = rdf_protodict.RDFValueArray
class ApiRDFBoolRenderer(ApiValueRenderer):
"""Renderer for RDFBool."""
value_class = rdfvalue.RDFBool
def RenderValue(self, value):
return self._IncludeTypeInfo(value != 0, value)
class ApiRDFBytesRenderer(ApiValueRenderer):
"""Renderer for RDFBytes."""
value_class = rdfvalue.RDFBytes
def RenderValue(self, value):
result = base64.b64encode(value.SerializeToString())
return self._IncludeTypeInfo(result, value)
class ApiRDFStringRenderer(ApiValueRenderer):
"""Renderer for RDFString."""
value_class = rdfvalue.RDFString
def RenderValue(self, value):
result = utils.SmartUnicode(value)
return self._IncludeTypeInfo(result, value)
class ApiRDFIntegerRenderer(ApiValueRenderer):
"""Renderer for RDFInteger."""
value_class = rdfvalue.RDFInteger
def RenderValue(self, value):
result = int(value)
return self._IncludeTypeInfo(result, value)
class ApiFlowStateRenderer(ApiValueRenderer):
"""Renderer for FlowState."""
value_class = rdf_flows.FlowState
def RenderValue(self, value):
return self._PassThrough(value.data)
class ApiDataBlobRenderer(ApiValueRenderer):
"""Renderer for DataBlob."""
value_class = rdf_protodict.DataBlob
def RenderValue(self, value):
return self._PassThrough(value.GetValue())
class ApiHashDigestRenderer(ApiValueRenderer):
"""Renderer for hash digests."""
value_class = rdfvalue.HashDigest
def RenderValue(self, value):
result = utils.SmartStr(value)
return self._IncludeTypeInfo(result, value)
class ApiEmbeddedRDFValueRenderer(ApiValueRenderer):
"""Renderer for EmbeddedRDFValue."""
value_class = rdf_protodict.EmbeddedRDFValue
def RenderValue(self, value):
return self._PassThrough(value.payload)
class ApiRDFProtoStructRenderer(ApiValueRenderer):
"""Renderer for RDFProtoStructs."""
value_class = rdf_structs.RDFProtoStruct
value_processors = []
metadata_processors = []
def RenderValue(self, value):
result = value.AsDict()
for k, v in value.AsDict().items():
result[k] = self._PassThrough(v)
for processor in self.value_processors:
result = processor(self, result, value)
result = self._IncludeTypeInfo(result, value)
return result
def RenderMetadata(self, value_cls):
fields = []
for field_desc in value_cls.type_infos:
repeated = isinstance(field_desc, type_info.ProtoList)
if hasattr(field_desc, "delegate"):
field_desc = field_desc.delegate
field = {
"name": field_desc.name,
"index": field_desc.field_number,
"repeated": repeated,
"dynamic": isinstance(field_desc, type_info.ProtoDynamicEmbedded)
}
field_type = field_desc.type
if field_type is not None:
field["type"] = field_type.__name__
if field_type.context_help_url:
field["context_help_url"] = field_type.context_help_url
if field_type == rdf_structs.EnumNamedValue:
allowed_values = []
for enum_label in sorted(field_desc.enum, key=field_desc.enum.get):
enum_value = field_desc.enum[enum_label]
labels = [rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in enum_value.labels or []]
allowed_values.append(dict(name=enum_label,
value=int(enum_value),
labels=labels,
doc=enum_value.description))
field["allowed_values"] = allowed_values
field_default = None
if (field_desc.default is not None
and not aff4.issubclass(field_type, rdf_structs.RDFStruct)
and hasattr(field_desc, "GetDefault")):
field_default = field_desc.GetDefault()
field["default"] = RenderValue(field_default)
if field_desc.description:
field["doc"] = field_desc.description
if field_desc.friendly_name:
field["friendly_name"] = field_desc.friendly_name
if field_desc.labels:
field["labels"] = [rdf_structs.SemanticDescriptor.Labels.reverse_enum[x]
for x in field_desc.labels]
fields.append(field)
for processor in self.metadata_processors:
fields = processor(self, fields)
result = dict(name=value_cls.__name__,
mro=[klass.__name__ for klass in value_cls.__mro__],
doc=value_cls.__doc__ or "",
fields=fields,
kind="struct")
if getattr(value_cls, "union_field", None):
result["union_field"] = value_cls.union_field
struct_default = None
try:
struct_default = value_cls()
except Exception as e: # pylint: disable=broad-except
# TODO(user): Some RDFStruct classes can't be constructed using
# default constructor (without arguments). Fix the code so that
# we can either construct all the RDFStruct classes with default
# constructors or know exactly which classes can't be constructed
# with default constructors.
logging.debug("Can't create default for struct %s: %s",
field_type.__name__, e)
if struct_default is not None:
result["default"] = RenderValue(struct_default)
return result
class ApiGrrMessageRenderer(ApiRDFProtoStructRenderer):
"""Renderer for GrrMessage objects."""
value_class = rdf_flows.GrrMessage
def RenderPayload(self, result, value):
"""Renders GrrMessage payload and renames args_rdf_name field."""
if "args_rdf_name" in result:
result["payload_type"] = result["args_rdf_name"]
del result["args_rdf_name"]
if "args" in result:
result["payload"] = self._PassThrough(value.payload)
del result["args"]
return result
def RenderPayloadMetadata(self, fields):
"""Payload-aware metadata processor."""
for f in fields:
if f["name"] == "args_rdf_name":
f["name"] = "payload_type"
if f["name"] == "args":
f["name"] = "payload"
return fields
value_processors = [RenderPayload]
metadata_processors = [RenderPayloadMetadata]
def RenderValue(value, limit_lists=-1):
"""Render given RDFValue as plain old python objects."""
if value is None:
return None
renderer = ApiValueRenderer.GetRendererForValueOrClass(
value, limit_lists=limit_lists)
return renderer.RenderValue(value)
def RenderTypeMetadata(value_cls):
renderer = ApiValueRenderer.GetRendererForValueOrClass(value_cls)
return renderer.RenderMetadata(value_cls)
|
apache-2.0
| 6,144,866,955,271,469,000 | 1,910,851,381,231,771,000 | 26.797357 | 80 | 0.662203 | false |
xozzo/pyfootball
|
setup.py
|
1
|
1257
|
from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
readme_path = 'README.rst'
else:
readme_path = 'README.md'
setup(
name='pyfootball',
version='1.0.1',
description='A client library for the football-data.org REST API',
long_description=open(readme_path).read(),
url='https://github.com/xozzo/pyfootball',
author='Timothy Ng',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
keywords='api wrapper client library football data',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'venv']),
install_requires=['requests'],
test_suite='tests',
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['sphinx', 'sphinx-autobuild']
}
)
|
mit
| -3,872,312,667,529,373,700 | 8,181,063,739,811,245,000 | 27.568182 | 73 | 0.6428 | false |
feroda/django
|
tests/template_tests/templatetags/custom.py
|
42
|
4823
|
import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
|
bsd-3-clause
| 5,177,759,513,438,584,000 | 336,648,686,767,823,500 | 32.964789 | 125 | 0.701223 | false |
frankk00/realtor
|
oauth_provider/oauth.py
|
1
|
23473
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
logger.warning("!!! IN OAuthServer.fetch_access_token OAuth Params: %s"%oauth_request.parameters)
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
logging.error("key: %s",key)
logging.error("base: %s",base)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = abs(now - timestamp)
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
logging.info("Built signature: %s"%(built))
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key
|
bsd-3-clause
| -5,663,535,957,237,510,000 | 5,918,559,246,789,966,000 | 34.35241 | 105 | 0.615388 | false |
fsschneider/DeepOBS
|
deepobs/tensorflow/datasets/two_d.py
|
1
|
4790
|
# -*- coding: utf-8 -*-
"""2D DeepOBS dataset."""
import numpy as np
import tensorflow as tf
from . import dataset
class two_d(dataset.DataSet):
"""DeepOBS data set class to create two dimensional stochastic testproblems.
This toy data set consists of a fixed number (``train_size``) of iid draws
from two scalar zero-mean normal distributions with standard deviation
specified by the ``noise_level``.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``1000`` for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
Attributes:
batch: A tuple ``(x, y)`` of tensors with random x and y that can be used to
create a noisy two dimensional testproblem. Executing these
tensors raises a ``tf.errors.OutOfRangeError`` after one epoch.
train_init_op: A tensorflow operation initializing the dataset for the
training phase.
train_eval_init_op: A tensorflow operation initializing the testproblem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the testproblem for
evaluating on test data.
phase: A string-value tf.Variable that is set to "train", "train_eval" or
"test", depending on the current phase. This can be used by testproblems
to adapt their behavior to this phase.
"""
def __init__(self, batch_size, train_size=10000, noise_level=1.0):
"""Creates a new 2D instance.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (1k for train and test) the
remainder is dropped in each epoch (after shuffling).
train_size (int): Size of the training data set. This will also be used as
the train_eval and test set size. Defaults to ``1000``.
noise_level (float): Standard deviation of the data points around the mean.
The data points are drawn from a Gaussian distribution. Defaults to
``1.0``.
"""
self._name = "two_d"
self._train_size = train_size
self._noise_level = noise_level
super(two_d, self).__init__(batch_size)
def _make_dataset(self, data_x, data_y, shuffle=True):
"""Creates a 2D data set (helper used by ``.make_*_datset`` below).
Args:
data_x (np.array): Numpy array containing the ``X`` values of the
data points.
data_y (np.array): Numpy array containing the ``y`` values of the
data points.
shuffle (bool): Switch to turn on or off shuffling of the data set.
Defaults to ``True``.
Returns:
A tf.data.Dataset yielding batches of 2D data.
"""
with tf.name_scope(self._name):
with tf.device('/cpu:0'):
data = tf.data.Dataset.from_tensor_slices((data_x, data_y))
if shuffle:
data = data.shuffle(buffer_size=20000)
data = data.batch(self._batch_size, drop_remainder=True)
data = data.prefetch(buffer_size=4)
return data
def _make_train_dataset(self):
"""Creates the 2D training dataset.
Returns:
A tf.data.Dataset instance with batches of training data.
"""
# Draw data from a random generator with a fixed seed to always get the
# same data.
rng = np.random.RandomState(42)
data_x = rng.normal(0.0, self._noise_level, self._train_size)
data_y = rng.normal(0.0, self._noise_level, self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=True)
def _make_train_eval_dataset(self):
"""Creates the 2D train eval dataset.
Returns:
A tf.data.Dataset instance with batches of training eval data.
"""
return self._train_dataset.take(self._train_size // self._batch_size)
def _make_test_dataset(self):
"""Creates the 2D test dataset.
Returns:
A tf.data.Dataset instance with batches of test data.
"""
# recovers the deterministic 2D function using zeros
data_x, data_y = np.zeros(self._train_size), np.zeros(self._train_size)
data_x = np.float32(data_x)
data_y = np.float32(data_y)
return self._make_dataset(data_x, data_y, shuffle=False)
|
mit
| -8,942,848,130,044,451,000 | -3,534,458,057,298,765,000 | 40.652174 | 81 | 0.632359 | false |
rspavel/spack
|
lib/spack/external/py/_log/warning.py
|
218
|
2542
|
import py, sys
class DeprecationWarning(DeprecationWarning):
def __init__(self, msg, path, lineno):
self.msg = msg
self.path = path
self.lineno = lineno
def __repr__(self):
return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
def __str__(self):
return self.msg
def _apiwarn(startversion, msg, stacklevel=2, function=None):
# below is mostly COPIED from python2.4/warnings.py's def warn()
# Get context information
if isinstance(stacklevel, str):
frame = sys._getframe(1)
level = 1
found = frame.f_code.co_filename.find(stacklevel) != -1
while frame:
co = frame.f_code
if co.co_filename.find(stacklevel) == -1:
if found:
stacklevel = level
break
else:
found = True
level += 1
frame = frame.f_back
else:
stacklevel = 1
msg = "%s (since version %s)" %(msg, startversion)
warn(msg, stacklevel=stacklevel+1, function=function)
def warn(msg, stacklevel=1, function=None):
if function is not None:
filename = py.std.inspect.getfile(function)
lineno = py.code.getrawcode(function).co_firstlineno
else:
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
filename = filename[:-1]
elif fnl.endswith("$py.class"):
filename = filename.replace('$py.class', '.py')
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
path = py.path.local(filename)
warning = DeprecationWarning(msg, path, lineno)
py.std.warnings.warn_explicit(warning, category=Warning,
filename=str(warning.path),
lineno=warning.lineno,
registry=py.std.warnings.__dict__.setdefault(
"__warningsregistry__", {})
)
|
lgpl-2.1
| 4,947,566,459,161,752,000 | -7,571,219,410,460,630,000 | 32.447368 | 76 | 0.541699 | false |
googleinterns/learnbase
|
learnbase/src/main/webapp/WEB-INF/Lib/distutils/extension.py
|
250
|
10904
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
apache-2.0
| 4,399,066,553,695,931,400 | 3,443,264,542,691,127,300 | 41.760784 | 80 | 0.555576 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.