repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
justajeffy/arsenalsuite | python/pythondotnet/pythonnet/src/tests/test_class.py | 10 | 7151 | # ===========================================================================
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
# ===========================================================================
from System.Collections import Hashtable
from Python.Test import ClassTest
import sys, os, string, unittest, types
import Python.Test as Test
import System
class ClassTests(unittest.TestCase):
"""Test CLR class support."""
def testBasicReferenceType(self):
"""Test usage of CLR defined reference types."""
String = System.String
self.assertEquals(String.Empty, "")
def testBasicValueType(self):
"""Test usage of CLR defined value types."""
Int32 = System.Int32
self.assertEquals(Int32.MaxValue, 2147483647)
def testClassStandardAttrs(self):
"""Test standard class attributes."""
self.failUnless(ClassTest.__name__ == 'ClassTest')
self.failUnless(ClassTest.__module__ == 'Python.Test')
self.failUnless(type(ClassTest.__dict__) == types.DictProxyType)
self.failUnless(len(ClassTest.__doc__) > 0)
def testClassDocstrings(self):
"""Test standard class docstring generation"""
value = 'Void .ctor()'
self.failUnless(ClassTest.__doc__ == value)
def testClassDefaultStr(self):
"""Test the default __str__ implementation for managed objects."""
s = System.String("this is a test")
self.failUnless(str(s) == "this is a test")
def testClassDefaultRepr(self):
"""Test the default __repr__ implementation for managed objects."""
s = System.String("this is a test")
self.failUnless(repr(s).startswith("<System.String object"))
def testNonPublicClass(self):
"""Test that non-public classes are inaccessible."""
from Python import Test
def test():
from Python.Test import InternalClass
self.failUnlessRaises(ImportError, test)
def test():
x = Test.InternalClass
self.failUnlessRaises(AttributeError, test)
def testBasicSubclass(self):
"""Test basic subclass of a managed class."""
class MyTable(Hashtable):
def howMany(self):
return self.Count
table = MyTable()
self.failUnless(table.__class__.__name__.endswith('MyTable'))
self.failUnless(type(table).__name__.endswith('MyTable'))
self.failUnless(len(table.__class__.__bases__) == 1)
self.failUnless(table.__class__.__bases__[0] == Hashtable)
self.failUnless(table.howMany() == 0)
self.failUnless(table.Count == 0)
table.set_Item('one', 'one')
self.failUnless(table.howMany() == 1)
self.failUnless(table.Count == 1)
MyTable = None
def testSubclassWithNoArgConstructor(self):
"""Test subclass of a managed class with a no-arg constructor."""
from Python.Test import ClassCtorTest1
class SubClass(ClassCtorTest1):
def __init__(self, name):
self.name = name
# This failed in earlier versions
inst = SubClass('test')
def testSubclassWithVariousConstructors(self):
"""Test subclass of a managed class with various constructors."""
from Python.Test import ClassCtorTest2
class SubClass(ClassCtorTest2):
def __init__(self, v):
ClassCtorTest2.__init__(self)
self.value = v
inst = SubClass('test')
self.failUnless(inst.value == 'test')
class SubClass2(ClassCtorTest2):
def __init__(self, v):
ClassCtorTest2.__init__(self)
self.value = v
inst = SubClass2('test')
self.failUnless(inst.value == 'test')
def testStructConstruction(self):
"""Test construction of structs."""
from System.Drawing import Point
def test():
p = Point()
self.failUnlessRaises(TypeError, test)
p = Point(0, 0)
self.failUnless(p.X == 0)
self.failUnless(p.Y == 0)
p.X = 10
p.Y = 10
self.failUnless(p.X == 10)
self.failUnless(p.Y == 10)
# test strange __new__ interactions
# test weird metatype
# test recursion
# test
def testIEnumerableIteration(self):
"""Test iteration over objects supporting IEnumerable."""
list = Test.ClassTest.GetArrayList()
for item in list:
self.failUnless((item > -1) and (item < 10))
dict = Test.ClassTest.GetHashtable()
for item in dict:
cname = item.__class__.__name__
self.failUnless(cname.endswith('DictionaryEntry'))
def testIEnumeratorIteration(self):
"""Test iteration over objects supporting IEnumerator."""
chars = Test.ClassTest.GetEnumerator()
for item in chars:
self.failUnless(item in 'test string')
def testOverrideGetItem(self):
"""Test managed subclass overriding __getitem__."""
class MyTable(Hashtable):
def __getitem__(self, key):
value = Hashtable.__getitem__(self, key)
return 'my ' + str(value)
table = MyTable()
table['one'] = 'one'
table['two'] = 'two'
table['three'] = 'three'
self.failUnless(table['one'] == 'my one')
self.failUnless(table['two'] == 'my two')
self.failUnless(table['three'] == 'my three')
self.failUnless(table.Count == 3)
def testOverrideSetItem(self):
"""Test managed subclass overriding __setitem__."""
class MyTable(Hashtable):
def __setitem__(self, key, value):
value = 'my ' + str(value)
Hashtable.__setitem__(self, key, value)
table = MyTable()
table['one'] = 'one'
table['two'] = 'two'
table['three'] = 'three'
self.failUnless(table['one'] == 'my one')
self.failUnless(table['two'] == 'my two')
self.failUnless(table['three'] == 'my three')
self.failUnless(table.Count == 3)
class ClassicClass:
def kind(self):
return 'classic'
class NewStyleClass(object):
def kind(self):
return 'new-style'
def test_suite():
return unittest.makeSuite(ClassTests)
def main():
unittest.TextTestRunner().run(test_suite())
if __name__ == '__main__':
main()
| gpl-2.0 |
Kapeli/PopClip-Extensions | source/OneNote/requests/cookies.py | 204 | 16791 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import time
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. See keys() and values(). Allows client-code to call
``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
pairs."""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(cookie)
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as
args name and optional domain and path. Returns a cookie.value. If
there are conflicting cookies, _find arbitrarily chooses one. See
_find_no_duplicates if you want an exception thrown if there are
conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests. Takes as args name and optional domain and
path. Returns a cookie.value. Throws KeyError if cookie is not found
and CookieConflictError if there are multiple cookies that match name
and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
expires = time.time() + morsel['max-age']
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = time.mktime(
time.strptime(morsel['expires'], time_template)) - time.timezone
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| mit |
MariosPanag/coala | tests/processes/ProcessingTest.py | 11 | 30078 | import copy
import multiprocessing
import os
import platform
import queue
import re
import subprocess
import sys
import unittest
from pyprint.ConsolePrinter import ConsolePrinter
from coalib.bears.Bear import Bear
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.processes.Processing import (
ACTIONS, autoapply_actions, check_result_ignore, create_process_group,
execute_section, filter_raising_callables, get_default_actions,
get_file_dict, print_result, process_queues, simplify_section_result,
yield_ignore_ranges)
from coalib.results.HiddenResult import HiddenResult
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.results.result_actions.PrintDebugMessageAction import (
PrintDebugMessageAction)
from coalib.results.result_actions.ResultAction import ResultAction
from coalib.results.SourceRange import SourceRange
from coalib.settings.ConfigurationGathering import gather_configuration
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.misc.Caching import FileCache
process_group_test_code = """
import time, subprocess, os, platform, sys;
p=subprocess.Popen([sys.executable,
"-c",
"import time; time.sleep(0.1)"]);
pgid = p.pid if platform.system() == "Windows" else os.getpgid(p.pid);
print(p.pid, pgid)
p.terminate()
"""
class DummyProcess(multiprocessing.Process):
def __init__(self, control_queue, starts_dead=False):
multiprocessing.Process.__init__(self)
self.control_queue = control_queue
self.starts_dead = starts_dead
def is_alive(self):
return not self.control_queue.empty() and not self.starts_dead
class ProcessingTestLogPrinter(LogPrinter):
def __init__(self, log_queue):
LogPrinter.__init__(self, self)
self.log_queue = log_queue
self.set_up = False
def log_message(self, log_message, timestamp=None, **kwargs):
self.log_queue.put(log_message)
class ProcessingTest(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'section_executor_test_files',
'.coafile'))
self.testcode_c_path = os.path.join(os.path.dirname(config_path),
'testcode.c')
self.result_queue = queue.Queue()
self.queue = queue.Queue()
self.log_queue = queue.Queue()
self.console_printer = ConsolePrinter()
log_printer = LogPrinter(ConsolePrinter())
self.log_printer = ProcessingTestLogPrinter(self.log_queue)
(self.sections,
self.local_bears,
self.global_bears,
targets) = gather_configuration(lambda *args: True,
log_printer,
arg_list=['--config',
re.escape(config_path)])
self.assertEqual(len(self.local_bears['cli']), 1)
self.assertEqual(len(self.global_bears['cli']), 1)
self.assertEqual(targets, [])
def test_run(self):
self.sections['cli'].append(Setting('jobs', '1'))
cache = FileCache(self.log_printer, 'coala_test', flush_cache=True)
results = execute_section(self.sections['cli'],
self.global_bears['cli'],
self.local_bears['cli'],
lambda *args: self.result_queue.put(args[2]),
cache,
self.log_printer,
console_printer=self.console_printer)
self.assertTrue(results[0])
local_results = self.result_queue.get(timeout=0)
global_results = self.result_queue.get(timeout=0)
self.assertTrue(self.result_queue.empty())
self.assertEqual(len(local_results), 1)
self.assertEqual(len(global_results), 1)
# Result dict also returned
# One file
self.assertEqual(len(results[1]), 1)
# One global bear
self.assertEqual(len(results[2]), 1)
local_result = local_results[0]
global_result = global_results[0]
self.assertRegex(repr(local_result),
"<Result object\\(id={}, origin='LocalTestBear', aff"
'ected_code=\\(\\), severity=NORMAL, confidence=100'
", message='test msg', aspect=NoneType\\) at "
'0x[0-9a-fA-F]+>'.format(hex(local_result.id)))
self.assertRegex(repr(global_result),
"<Result object\\(id={}, origin='GlobalTestBear', "
'affected_code=\\(.*start=.*file=.*section_executor_'
'test_files.*line=None.*end=.*\\), severity=NORMAL, c'
"onfidence=100, message='test message', "
'aspect=NoneType\\'
') at 0x[0-9a-fA-F]+>'.format(hex(global_result.id)))
def test_empty_run(self):
execute_section(self.sections['cli'],
[],
[],
lambda *args: self.result_queue.put(args[2]),
None,
self.log_printer,
console_printer=self.console_printer)
self.sections['cli'].append(Setting('jobs', 'bogus!'))
results = execute_section(self.sections['cli'],
[],
[],
lambda *args: self.result_queue.put(args[2]),
None,
self.log_printer,
console_printer=self.console_printer)
# No results
self.assertFalse(results[0])
# One file
self.assertEqual(len(results[1]), 1)
# No global bear
self.assertEqual(len(results[2]), 0)
def test_process_queues(self):
ctrlq = queue.Queue()
# Append custom controlling sequences.
# Simulated process 1
ctrlq.put((CONTROL_ELEMENT.LOCAL, 1))
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL, 1))
# Simulated process 2
ctrlq.put((CONTROL_ELEMENT.LOCAL, 2))
# Simulated process 1
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
# Simulated process 2
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL, 1))
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
first_local = Result.from_values('o', 'The first result.', file='f')
second_local = Result.from_values('ABear',
'The second result.',
file='f',
line=1)
third_local = Result.from_values('ABear',
'The second result.',
file='f',
line=4)
fourth_local = Result.from_values('ABear',
'Another result.',
file='f',
line=7)
first_global = Result('o', 'The one and only global result.')
section = Section('')
section.append(Setting('min_severity', 'normal'))
process_queues(
[DummyProcess(control_queue=ctrlq) for i in range(3)],
ctrlq,
{1: [first_local,
second_local,
third_local,
# The following are to be ignored
Result('o', 'm', severity=RESULT_SEVERITY.INFO),
Result.from_values('ABear', 'u', 'f', 2, 1),
Result.from_values('ABear', 'u', 'f', 3, 1)],
2: [fourth_local,
# The following are to be ignored
HiddenResult('t', 'c'),
Result.from_values('ABear', 'u', 'f', 5, 1),
Result.from_values('ABear', 'u', 'f', 6, 1)]},
{1: [first_global]},
{'f': ['first line # stop ignoring, invalid ignore range\n',
'second line # ignore all\n',
'third line\n',
"fourth line # gnore shouldn't trigger without i!\n",
'# Start ignoring ABear, BBear and CBear\n',
'# Stop ignoring\n',
'seventh']},
lambda *args: self.queue.put(args[2]),
section,
None,
self.log_printer,
self.console_printer)
self.assertEqual(self.queue.get(timeout=0), ([second_local,
third_local]))
self.assertEqual(self.queue.get(timeout=0), ([fourth_local]))
self.assertEqual(self.queue.get(timeout=0), ([first_global]))
self.assertEqual(self.queue.get(timeout=0), ([first_global]))
def test_dead_processes(self):
ctrlq = queue.Queue()
# Not enough FINISH elements in the queue, processes start already dead
# Also queue elements are reversed
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
process_queues(
[DummyProcess(ctrlq, starts_dead=True) for i in range(3)],
ctrlq, {}, {}, {},
lambda *args: self.queue.put(args[2]),
Section(''),
None,
self.log_printer,
self.console_printer)
with self.assertRaises(queue.Empty):
self.queue.get(timeout=0)
# Not enough FINISH elements in the queue, processes start already dead
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
process_queues(
[DummyProcess(ctrlq, starts_dead=True) for i in range(3)],
ctrlq, {}, {}, {},
lambda *args: self.queue.put(args[2]),
Section(''),
None,
self.log_printer,
self.console_printer)
with self.assertRaises(queue.Empty):
self.queue.get(timeout=0)
def test_create_process_group(self):
p = create_process_group([sys.executable,
'-c',
process_group_test_code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retval = p.wait()
if retval != 0:
for line in p.stderr:
print(line, end='')
raise Exception('Subprocess did not exit correctly')
output = [i for i in p.stdout]
p.stderr.close()
p.stdout.close()
pid, pgid = [int(i.strip()) for i_out in output for i in i_out.split()]
if platform.system() != 'Windows':
# There is no way of testing this on windows with the current
# python modules subprocess and os
self.assertEqual(p.pid, pgid)
def test_filter_raising_callables(self):
class A(Exception):
pass
class B(Exception):
pass
class C(Exception):
pass
def create_exception_raiser(exception):
def raiser(exc):
if exception in exc:
raise exception
return exception
return raiser
raiseA, raiseB, raiseC = (create_exception_raiser(exc)
for exc in [A, B, C])
test_list = [raiseA, raiseC, raiseB, raiseC]
self.assertEqual(list(filter_raising_callables(test_list, A, (A,))),
[C, B, C])
self.assertEqual(list(filter_raising_callables(test_list,
(B, C),
exc=(B, C))),
[A])
# Test whether non filtered exceptions bubble up.
with self.assertRaises(B):
list(filter_raising_callables(test_list, C, exc=(B, C)))
def test_get_file_dict(self):
file_dict = get_file_dict([self.testcode_c_path], self.log_printer)
self.assertEqual(len(file_dict), 1)
self.assertEqual(type(file_dict[self.testcode_c_path]),
tuple,
msg='files in file_dict should not be editable')
self.assertEqual('Files that will be checked:\n' + self.testcode_c_path,
self.log_printer.log_queue.get().message)
def test_get_file_dict_non_existent_file(self):
file_dict = get_file_dict(['non_existent_file'], self.log_printer)
self.assertEqual(file_dict, {})
self.assertIn(("Failed to read file 'non_existent_file' because of "
'an unknown error.'),
self.log_printer.log_queue.get().message)
def test_simplify_section_result(self):
results = (True,
{'file1': [Result('a', 'b')], 'file2': None},
{'file3': [Result('a', 'c')]},
None)
yielded, yielded_unfixed, all_results = simplify_section_result(results)
self.assertEqual(yielded, True)
self.assertEqual(yielded_unfixed, True)
self.assertEqual(len(all_results), 2)
def test_ignore_results(self):
ranges = [([], SourceRange.from_values('f', 1, 1, 2, 2))]
result = Result.from_values('origin (Something Specific)',
'message',
file='e',
line=1,
column=1,
end_line=2,
end_column=2)
self.assertFalse(check_result_ignore(result, ranges))
ranges.append(([], SourceRange.from_values('e', 2, 3, 3, 3)))
self.assertFalse(check_result_ignore(result, ranges))
ranges.append(([], SourceRange.from_values('e', 1, 1, 2, 2)))
self.assertTrue(check_result_ignore(result, ranges))
result1 = Result.from_values('origin', 'message', file='e')
self.assertTrue(check_result_ignore(result1, ranges))
ranges = [(['something', 'else', 'not origin'],
SourceRange.from_values('e', 1, 1, 2, 2))]
self.assertFalse(check_result_ignore(result, ranges))
ranges = [(['something', 'else', 'origin'],
SourceRange.from_values('e', 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
def test_ignore_glob(self):
result = Result.from_values('LineLengthBear',
'message',
file='d',
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(['(line*|space*)', 'py*'],
SourceRange.from_values('d', 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
result = Result.from_values('SpaceConsistencyBear',
'message',
file='d',
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(['(line*|space*)', 'py*'],
SourceRange.from_values('d', 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
result = Result.from_values('XMLBear',
'message',
file='d',
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(['(line*|space*)', 'py*'],
SourceRange.from_values('d', 1, 1, 2, 2))]
self.assertFalse(check_result_ignore(result, ranges))
def test_yield_ignore_ranges(self):
test_file_dict_a = {'f':
('# Ignore aBear\n',
'a_string = "This string should be ignored"\n')}
test_ignore_range_a = list(yield_ignore_ranges(test_file_dict_a))
for test_bears, test_source_range in test_ignore_range_a:
self.assertEqual(test_bears, ['abear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 43)
test_file_dict_b = {'f':
('# start Ignoring bBear\n',
'b_string = "This string should be ignored"\n',
'# stop ignoring\n')}
test_ignore_range_b = list(yield_ignore_ranges(test_file_dict_b))
for test_bears, test_source_range in test_ignore_range_b:
self.assertEqual(test_bears, ['bbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 3)
self.assertEqual(test_source_range.end.column, 16)
test_file_dict_c = {'f':
('# Start ignoring cBear\n',
'# Stop ignoring cBear This & prev ignored\n')}
test_ignore_range_c = list(yield_ignore_ranges(test_file_dict_c))
for test_bears, test_source_range in test_ignore_range_c:
self.assertEqual(test_bears, ['cbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 42)
test_file_dict_d = {'f':
('# Start ignoring cBear\n',
'All of this ignored\n')}
test_ignore_range_d = list(yield_ignore_ranges(test_file_dict_d))
for test_bears, test_source_range in test_ignore_range_d:
self.assertEqual(test_bears, ['cbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 20)
test_file_dict_e = {'f':
('# Ignore all\n',
'e_string = "This string should be ignored"\n')}
test_ignore_range_e = list(yield_ignore_ranges(test_file_dict_e))
for test_bears, test_source_range in test_ignore_range_e:
self.assertEqual(test_bears, [])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 43)
test_file_dict_n = {'f':
('# noqa nBear\n',
'n_string = "This string should be ignored"\n')}
test_ignore_range_n = list(yield_ignore_ranges(test_file_dict_n))
for test_bears, test_source_range in test_ignore_range_n:
self.assertEqual(test_bears, ['nbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 43)
test_file_dict_n = {'f':
('# noqa\n',
'n_string = "This string should be ignored"\n')}
test_ignore_range_n = list(yield_ignore_ranges(test_file_dict_n))
for test_bears, test_source_range in test_ignore_range_n:
self.assertEqual(test_bears, [])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 43)
# This case was a bug.
test_file_dict_single_line = {'f': ('# ignore XBEAR',)}
test_ignore_range_single_line = list(yield_ignore_ranges(
test_file_dict_single_line))
self.assertEqual(len(test_ignore_range_single_line), 1)
bears, source_range = test_ignore_range_single_line[0]
self.assertEqual(bears, ['xbear'])
self.assertEqual(source_range.start.line, 1)
self.assertEqual(source_range.start.column, 1)
self.assertEqual(source_range.end.line, 1)
self.assertEqual(source_range.end.column, 14)
def test_loaded_bears_with_error_result(self):
class BearWithMissingPrerequisites(Bear):
def __init__(self, section, queue, timeout=0.1):
Bear.__init__(self, section, queue, timeout)
def run(self):
return []
@classmethod
def check_prerequisites(cls):
return False
multiprocessing.Queue()
tmp_local_bears = copy.copy(self.local_bears['cli'])
tmp_local_bears.append(BearWithMissingPrerequisites)
cache = FileCache(self.log_printer,
'coala_test_on_error',
flush_cache=True)
results = execute_section(self.sections['cli'],
[],
tmp_local_bears,
lambda *args: self.result_queue.put(args[2]),
cache,
self.log_printer,
console_printer=self.console_printer)
self.assertEqual(len(cache.data), 0)
cache = FileCache(self.log_printer,
'coala_test_on_error',
flush_cache=False)
results = execute_section(self.sections['cli'],
[],
self.local_bears['cli'],
lambda *args: self.result_queue.put(args[2]),
cache,
self.log_printer,
console_printer=self.console_printer)
self.assertGreater(len(cache.data), 0)
class ProcessingTest_GetDefaultActions(unittest.TestCase):
def setUp(self):
self.section = Section('X')
def test_no_key(self):
self.assertEqual(get_default_actions(self.section), ({}, {}))
def test_no_value(self):
self.section.append(Setting('default_actions', ''))
self.assertEqual(get_default_actions(self.section), ({}, {}))
def test_only_valid_actions(self):
self.section.append(Setting(
'default_actions',
'MyBear: PrintDebugMessageAction, ValidBear: ApplyPatchAction'))
self.assertEqual(
get_default_actions(self.section),
({'MyBear': PrintDebugMessageAction,
'ValidBear': ApplyPatchAction},
{}))
def test_valid_and_invalid_actions(self):
self.section.append(Setting(
'default_actions',
'MyBear: INVALID_action, ValidBear: ApplyPatchAction, XBear: ABC'))
self.assertEqual(get_default_actions(self.section),
({'ValidBear': ApplyPatchAction},
{'MyBear': 'INVALID_action', 'XBear': 'ABC'}))
class ProcessingTest_AutoapplyActions(unittest.TestCase):
def setUp(self):
self.log_queue = queue.Queue()
self.log_printer = ProcessingTestLogPrinter(self.log_queue)
self.resultY = Result('YBear', 'msg1')
self.resultZ = Result('ZBear', 'msg2')
self.results = [self.resultY, self.resultZ]
self.section = Section('A')
def test_no_default_actions(self):
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertTrue(self.log_queue.empty())
def test_with_invalid_action(self):
self.section.append(Setting('default_actions',
'XBear: nonSENSE_action'))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
"Selected default action 'nonSENSE_action' for bear "
"'XBear' does not exist. Ignoring action.")
self.assertTrue(self.log_queue.empty())
def test_without_default_action_and_unapplicable(self):
# Use a result where no default action is supplied for and another one
# where the action is not applicable.
old_is_applicable = ApplyPatchAction.is_applicable
ApplyPatchAction.is_applicable = (
lambda *args: 'The ApplyPatchAction cannot be applied'
)
self.section.append(Setting(
'default_actions',
'NoBear: ApplyPatchAction, YBear: ApplyPatchAction'))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
'YBear: The ApplyPatchAction cannot be applied')
self.assertTrue(self.log_queue.empty())
ApplyPatchAction.is_applicable = old_is_applicable
self.section.append(Setting(
'no_autoapply_warn', True))
autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertTrue(self.log_queue.empty())
def test_applicable_action(self):
# Use a result whose action can be successfully applied.
log_printer = self.log_printer
class TestAction(ResultAction):
def apply(self, *args, **kwargs):
log_printer.debug('ACTION APPLIED SUCCESSFULLY.')
ACTIONS.append(TestAction)
self.section.append(Setting('default_actions', 'Z*: TestAction'))
ret = autoapply_actions(self.results,
{},
{},
self.section,
log_printer)
self.assertEqual(ret, [self.resultY])
self.assertEqual(self.log_queue.get().message,
'ACTION APPLIED SUCCESSFULLY.')
self.assertEqual(self.log_queue.get().message,
"Applied 'TestAction' "
"on the whole project from 'ZBear'.")
self.assertTrue(self.log_queue.empty())
ACTIONS.pop()
def test_failing_action(self):
class FailingTestAction(ResultAction):
def apply(self, *args, **kwargs):
raise RuntimeError("YEAH THAT'S A FAILING BEAR")
ACTIONS.append(FailingTestAction)
self.section.append(Setting('default_actions',
'YBear: FailingTestAction'))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
"Failed to execute action 'FailingTestAction'"
" with error: YEAH THAT'S A FAILING BEAR.")
self.assertIn("YEAH THAT'S A FAILING BEAR",
self.log_queue.get().message)
self.assertEqual(self.log_queue.get().message,
'-> for result ' + repr(self.resultY) + '.')
self.assertTrue(self.log_queue.empty())
ACTIONS.pop()
class ProcessingTest_PrintResult(unittest.TestCase):
def setUp(self):
self.section = Section('name')
self.log_printer = LogPrinter(ConsolePrinter(), log_level=0)
self.console_printer = ConsolePrinter()
def test_autoapply_override(self):
"""
Tests that the default_actions aren't automatically applied when the
autoapply setting overrides that.
"""
self.section.append(Setting('default_actions',
'somebear: PrintDebugMessageAction'))
# Verify that it would apply the action, i.e. remove the result
results = [5, HiddenResult('origin', []),
Result('somebear', 'message', debug_msg='debug')]
retval, newres = print_result(results, {}, 0, lambda *args: None,
self.section, self.log_printer, {}, [],
console_printer=self.console_printer)
self.assertEqual(newres, [])
| agpl-3.0 |
daniponi/django | tests/messages_tests/base.py | 7 | 14098 | from django import http
from django.contrib.messages import constants, get_level, set_level, utils
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.constants import DEFAULT_LEVELS
from django.contrib.messages.storage import base, default_storage
from django.contrib.messages.storage.base import Message
from django.test import modify_settings, override_settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
),
},
}],
ROOT_URLCONF='messages_tests.urls',
MESSAGE_TAGS='',
MESSAGE_STORAGE='%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER='django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
messages = [Message(self.levels[level], msg) for msg in data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_template_response')
for level in self.levels.keys():
add_url = reverse('add_template_response', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertIn('messages', response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
def test_context_processor_message_levels(self):
show_url = reverse('show_template_response')
response = self.client.get(show_url)
self.assertIn('DEFAULT_MESSAGE_LEVELS', response.context)
self.assertEqual(response.context['DEFAULT_MESSAGE_LEVELS'], DEFAULT_LEVELS)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('show_message')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend(Message(self.levels[level], msg) for msg in data['messages'])
add_url = reverse('add_message', args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertIn('messages', response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
MESSAGE_LEVEL=constants.DEBUG,
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
with self.assertRaises(MessageFailure):
self.client.post(add_url, data, follow=True)
@modify_settings(
INSTALLED_APPS={'remove': 'django.contrib.messages'},
MIDDLEWARE_CLASSES={'remove': 'django.contrib.messages.middleware.MessageMiddleware'},
)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
}],
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('show_message')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('add_message', args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertNotIn('messages', response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', '', 'extra-tag debug', 'warning', 'error', 'success'])
def test_level_tag(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.level_tag for msg in storage]
self.assertEqual(tags, ['info', '', 'debug', 'warning', 'error', 'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags, ['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| bsd-3-clause |
kazitanvirahsan/django-registration | docs/conf.py | 21 | 6469 | # -*- coding: utf-8 -*-
#
# django-registration documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 22 02:57:42 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-registration-redux'
copyright = u'2007-2013, James Bennett. 2014, Andrew Cutler and others.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-registration-reduxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-registration-redux.tex', u'django-registration-redux Documentation',
u'James Bennett', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
rst_epilog = '.. |project| replace:: %s' % project
| bsd-3-clause |
valdur55/py3status | py3status/modules/check_tcp.py | 2 | 1816 | """
Display status of a TCP port on a given host.
Configuration parameters:
cache_timeout: refresh interval for this module (default 10)
format: display format for this module (default '{host}:{port} {state}')
host: name of host to check for (default 'localhost')
icon_off: show this when unavailable (default 'DOWN')
icon_on: show this when available (default 'UP')
port: number of port to check for (default 22)
Format placeholders:
{state} port state
Color options:
color_down: Closed, default to color_bad
color_up: Open, default to color_good
@author obb, Moritz Lüdecke
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'localhost:22 UP'}
down
{'color': '#FF0000', 'full_text': u'localhost:22 DOWN'}
"""
import socket
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
format = "{host}:{port} {state}"
host = "localhost"
icon_off = "DOWN"
icon_on = "UP"
port = 22
def post_config_hook(self):
self.color_on = self.py3.COLOR_UP or self.py3.COLOR_GOOD
self.color_off = self.py3.COLOR_DOWN or self.py3.COLOR_BAD
def check_tcp(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((self.host, self.port))
if result:
color = self.color_off
state = self.icon_off
else:
color = self.color_on
state = self.icon_on
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(self.format, {"state": state}),
"color": color,
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause |
akvo/django-tastypie | tastypie/serializers.py | 34 | 19714 | from __future__ import unicode_literals
import datetime
import re
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.encoding import force_text, smart_bytes
from django.core.serializers import json as djangojson
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest, UnsupportedFormat
from tastypie.utils import format_datetime, format_date, format_time, make_naive
try:
import defusedxml.lxml as lxml
from defusedxml.common import DefusedXmlException
from defusedxml.lxml import parse as parse_xml
from lxml.etree import Element, tostring, LxmlError, XMLParser
except ImportError:
lxml = None
try:
import yaml
from django.core.serializers import pyyaml
except ImportError:
yaml = None
try:
import biplist
except ImportError:
biplist = None
import json
XML_ENCODING = re.compile('<\?xml.*?\?>', re.IGNORECASE)
# Ugh & blah.
# So doing a regular dump is generally fine, since Tastypie doesn't usually
# serialize advanced types. *HOWEVER*, it will dump out Python Unicode strings
# as a custom YAML tag, which of course ``yaml.safe_load`` can't handle.
if yaml is not None:
from yaml.constructor import SafeConstructor
from yaml.loader import Reader, Scanner, Parser, Composer, Resolver
class TastypieConstructor(SafeConstructor):
def construct_yaml_unicode_dammit(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
TastypieConstructor.add_constructor(u'tag:yaml.org,2002:python/unicode', TastypieConstructor.construct_yaml_unicode_dammit)
class TastypieLoader(Reader, Scanner, Parser, Composer, TastypieConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
TastypieConstructor.__init__(self)
Resolver.__init__(self)
class Serializer(object):
"""
A swappable class for serialization.
This handles most types of data as well as the following output formats::
* json
* jsonp (Disabled by default)
* xml
* yaml
* html
* plist (see http://explorapp.com/biplist/)
It was designed to make changing behavior easy, either by overridding the
various format methods (i.e. ``to_json``), by changing the
``formats/content_types`` options or by altering the other hook methods.
"""
formats = ['json', 'xml', 'yaml', 'html', 'plist']
content_types = {'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist'}
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
else:
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
self.supported_formats = []
if content_types is not None:
self.content_types = content_types
if formats is not None:
self.formats = formats
if self.formats is Serializer.formats and hasattr(settings, 'TASTYPIE_DEFAULT_FORMATS'):
# We want TASTYPIE_DEFAULT_FORMATS to override unmodified defaults but not intentational changes
# on Serializer subclasses:
self.formats = settings.TASTYPIE_DEFAULT_FORMATS
if not isinstance(self.formats, (list, tuple)):
raise ImproperlyConfigured('Formats should be a list or tuple, not %r' % self.formats)
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except KeyError:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format)
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except KeyError:
return 'application/json'
def format_datetime(self, data):
"""
A hook to control how datetimes are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16T03:02:14".
"""
data = make_naive(data)
if self.datetime_formatting == 'rfc-2822':
return format_datetime(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = data - datetime.timedelta(microseconds = data.microsecond)
return data.isoformat()
def format_date(self, data):
"""
A hook to control how dates are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16".
"""
if self.datetime_formatting == 'rfc-2822':
return format_date(data)
return data.isoformat()
def format_time(self, data):
"""
A hook to control how times are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_time(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = (datetime.datetime.combine(datetime.date(1,1,1),data) - datetime.timedelta(microseconds = data.microsecond)).time()
return data.isoformat()
def serialize(self, bundle, format='application/json', options=None):
"""
Given some data and a format, calls the correct method to serialize
the data and returns the result.
"""
desired_format = None
if options is None:
options = {}
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "to_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
serialized = getattr(self, "to_%s" % desired_format)(bundle, options)
return serialized
def deserialize(self, content, format='application/json'):
"""
Given some data and a format, calls the correct method to deserialize
the data and returns the result.
"""
desired_format = None
format = format.split(';')[0]
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "from_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
if isinstance(content, six.binary_type):
content = force_text(content)
deserialized = getattr(self, "from_%s" % desired_format)(content)
return deserialized
def to_simple(self, data, options):
"""
For a piece of data, attempts to recognize it and provide a simplified
form of something complex.
This brings complex Python data structures down to native types of the
serialization format(s).
"""
if isinstance(data, (list, tuple)):
return [self.to_simple(item, options) for item in data]
if isinstance(data, dict):
return dict((key, self.to_simple(val, options)) for (key, val) in data.items())
elif isinstance(data, Bundle):
return dict((key, self.to_simple(val, options)) for (key, val) in data.data.items())
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_simple(data.fk_resource, options)
else:
return self.to_simple(data.value, options)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
return [self.to_simple(bundle, options) for bundle in data.m2m_bundles]
else:
return [self.to_simple(val, options) for val in data.value]
else:
return self.to_simple(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif isinstance(data, (six.integer_types, float)):
return data
elif data is None:
return None
else:
return force_text(data)
def to_etree(self, data, options=None, name=None, depth=0):
"""
Given some data, converts that data to an ``etree.Element`` suitable
for use in the XML output.
"""
if isinstance(data, (list, tuple)):
element = Element(name or 'objects')
if name:
element = Element(name)
element.set('type', 'list')
else:
element = Element('objects')
for item in data:
element.append(self.to_etree(item, options, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, dict):
if depth == 0:
element = Element(name or 'response')
else:
element = Element(name or 'object')
element.set('type', 'hash')
for (key, value) in data.items():
element.append(self.to_etree(value, options, name=key, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, Bundle):
element = Element(name or 'object')
for field_name, field_object in data.data.items():
element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_etree(data.fk_resource, options, name, depth+1)
else:
return self.to_etree(data.value, options, name, depth+1)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
element = Element(name or 'objects')
for bundle in data.m2m_bundles:
element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1))
else:
element = Element(name or 'objects')
for value in data.value:
element.append(self.to_etree(value, options, name, depth=depth+1))
else:
return self.to_etree(data.value, options, name)
else:
element = Element(name or 'value')
simple_data = self.to_simple(data, options)
data_type = get_type_string(simple_data)
if data_type != 'string':
element.set('type', get_type_string(simple_data))
if data_type != 'null':
if isinstance(simple_data, six.text_type):
element.text = simple_data
else:
element.text = force_text(simple_data)
return element
def from_etree(self, data):
"""
Not the smartest deserializer on the planet. At the request level,
it first tries to output the deserialized subelement called "object"
or "objects" and falls back to deserializing based on hinted types in
the XML element attribute "type".
"""
if data.tag == 'request':
# if "object" or "objects" exists, return deserialized forms.
elements = data.getchildren()
for element in elements:
if element.tag in ('object', 'objects'):
return self.from_etree(element)
return dict((element.tag, self.from_etree(element)) for element in elements)
elif data.tag == 'object' or data.get('type') == 'hash':
return dict((element.tag, self.from_etree(element)) for element in data.getchildren())
elif data.tag == 'objects' or data.get('type') == 'list':
return [self.from_etree(element) for element in data.getchildren()]
else:
type_string = data.get('type')
if type_string in ('string', None):
return data.text
elif type_string == 'integer':
return int(data.text)
elif type_string == 'float':
return float(data.text)
elif type_string == 'boolean':
if data.text == 'True':
return True
else:
return False
else:
return None
def to_json(self, data, options=None):
"""
Given some Python data, produces JSON output.
"""
options = options or {}
data = self.to_simple(data, options)
return djangojson.json.dumps(data, cls=djangojson.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
def from_json(self, content):
"""
Given some JSON data, returns a Python dictionary of the decoded data.
"""
try:
return json.loads(content)
except ValueError:
raise BadRequest
def to_jsonp(self, data, options=None):
"""
Given some Python data, produces JSON output wrapped in the provided
callback.
Due to a difference between JSON and Javascript, two
newline characters, \u2028 and \u2029, need to be escaped.
See http://timelessrepo.com/json-isnt-a-javascript-subset for
details.
"""
options = options or {}
json = self.to_json(data, options)
json = json.replace(u'\u2028', u'\\u2028').replace(u'\u2029', u'\\u2029')
return u'%s(%s)' % (options['callback'], json)
def to_xml(self, data, options=None):
"""
Given some Python data, produces XML output.
"""
options = options or {}
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')
def from_xml(self, content, forbid_dtd=True, forbid_entities=True):
"""
Given some XML data, returns a Python dictionary of the decoded data.
By default XML entity declarations and DTDs will raise a BadRequest
exception content but subclasses may choose to override this if
necessary.
"""
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
try:
# Stripping the encoding declaration. Because lxml.
# See http://lxml.de/parsing.html, "Python unicode strings".
content = XML_ENCODING.sub('', content)
parsed = parse_xml(
six.StringIO(content),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities
)
except (LxmlError, DefusedXmlException):
raise BadRequest()
return self.from_etree(parsed.getroot())
def to_yaml(self, data, options=None):
"""
Given some Python data, produces YAML output.
"""
options = options or {}
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.dump(self.to_simple(data, options))
def from_yaml(self, content):
"""
Given some YAML data, returns a Python dictionary of the decoded data.
"""
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.load(content, Loader=TastypieLoader)
def to_plist(self, data, options=None):
"""
Given some Python data, produces binary plist output.
"""
options = options or {}
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
return biplist.writePlistToString(self.to_simple(data, options))
def from_plist(self, content):
"""
Given some binary plist data, returns a Python dictionary of the decoded data.
"""
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
if isinstance(content, six.text_type):
content = smart_bytes(content)
return biplist.readPlistFromString(content)
def to_html(self, data, options=None):
"""
Reserved for future usage.
The desire is to provide HTML output of a resource, making an API
available to a browser. This is on the TODO list but not currently
implemented.
"""
options = options or {}
return 'Sorry, not implemented yet. Please append "?format=json" to your URL.'
def from_html(self, content):
"""
Reserved for future usage.
The desire is to handle form-based (maybe Javascript?) input, making an
API available to a browser. This is on the TODO list but not currently
implemented.
"""
pass
def get_type_string(data):
"""
Translates a Python data type into a string format.
"""
data_type = type(data)
if data_type in six.integer_types:
return 'integer'
elif data_type == float:
return 'float'
elif data_type == bool:
return 'boolean'
elif data_type in (list, tuple):
return 'list'
elif data_type == dict:
return 'hash'
elif data is None:
return 'null'
elif isinstance(data, six.string_types):
return 'string'
| bsd-3-clause |
ralsina/pdfrw | examples/poster.py | 6 | 1464 | #!/usr/bin/env python
'''
usage: poster.py my.pdf
Shows how to change the size on a PDF.
Motivation:
My daughter needed to create a 48" x 36" poster, but her Mac version of Powerpoint
only wanted to output 8.5" x 11" for some reason.
'''
import sys
import os
import find_pdfrw
from pdfrw import PdfReader, PdfWriter, PdfDict, PdfName, PdfArray, IndirectPdfDict
from pdfrw.buildxobj import pagexobj
def adjust(page):
page = pagexobj(page)
assert page.BBox == [0, 0, 11 * 72, int(8.5 * 72)], page.BBox
margin = 72 // 2
old_x, old_y = page.BBox[2] - 2 * margin, page.BBox[3] - 2 * margin
new_x, new_y = 48 * 72, 36 * 72
ratio = 1.0 * new_x / old_x
assert ratio == 1.0 * new_y / old_y
index = '/BasePage'
x = -margin * ratio
y = -margin * ratio
stream = 'q %0.2f 0 0 %0.2f %s %s cm %s Do Q\n' % (ratio, ratio, x, y, index)
xobjdict = PdfDict()
xobjdict[index] = page
return PdfDict(
Type = PdfName.Page,
Contents = PdfDict(stream=stream),
MediaBox = PdfArray([0, 0, new_x, new_y]),
Resources = PdfDict(XObject = xobjdict),
)
def go(inpfn, outfn):
reader = PdfReader(inpfn)
page, = reader.pages
writer = PdfWriter()
writer.addpage(adjust(page))
writer.trailer.Info = IndirectPdfDict(reader.Info)
writer.write(outfn)
if __name__ == '__main__':
inpfn, = sys.argv[1:]
outfn = 'poster.' + os.path.basename(inpfn)
go(inpfn, outfn)
| mit |
arch1tect0r/root | interpreter/llvm/src/tools/clang/bindings/python/examples/cindex/cindex-dump.py | 85 | 2733 | #!/usr/bin/env python
#===- cindex-dump.py - cindex/Python Source Dump -------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a source file using the Clang Index
Library.
"""
def get_diag_info(diag):
return { 'severity' : diag.severity,
'location' : diag.location,
'spelling' : diag.spelling,
'ranges' : diag.ranges,
'fixits' : diag.fixits }
def get_cursor_id(cursor, cursor_list = []):
if not opts.showIDs:
return None
if cursor is None:
return None
# FIXME: This is really slow. It would be nice if the index API exposed
# something that let us hash cursors.
for i,c in enumerate(cursor_list):
if cursor == c:
return i
cursor_list.append(cursor)
return len(cursor_list) - 1
def get_info(node, depth=0):
if opts.maxDepth is not None and depth >= opts.maxDepth:
children = None
else:
children = [get_info(c, depth+1)
for c in node.get_children()]
return { 'id' : get_cursor_id(node),
'kind' : node.kind,
'usr' : node.get_usr(),
'spelling' : node.spelling,
'location' : node.location,
'extent.start' : node.extent.start,
'extent.end' : node.extent.end,
'is_definition' : node.is_definition(),
'definition id' : get_cursor_id(node.get_definition()),
'children' : children }
def main():
from clang.cindex import Index
from pprint import pprint
from optparse import OptionParser, OptionGroup
global opts
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.add_option("", "--show-ids", dest="showIDs",
help="Compute cursor IDs (very slow)",
action="store_true", default=False)
parser.add_option("", "--max-depth", dest="maxDepth",
help="Limit cursor expansion to depth N",
metavar="N", type=int, default=None)
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
pprint(('diags', map(get_diag_info, tu.diagnostics)))
pprint(('nodes', get_info(tu.cursor)))
if __name__ == '__main__':
main()
| lgpl-2.1 |
npalermo10/auto_choice_assay_train-test | venv/lib/python2.7/site-packages/serial/tools/list_ports.py | 11 | 3135 | #!/usr/bin/env python
#
# Serial port enumeration. Console tool and backend selection.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2011-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
This module will provide a function called comports that returns an
iterable (generator or list) that will enumerate available com ports. Note that
on some systems non-existent ports may be listed.
Additionally a grep function is supplied that can be used to search for ports
based on their descriptions or hardware ID.
"""
import sys
import os
import re
# chose an implementation, depending on os
#~ if sys.platform == 'cli':
#~ else:
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
#~ elif os.name == 'java':
else:
raise ImportError("Sorry: no implementation for your platform ('%s') available" % (os.name,))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def grep(regexp):
"""\
Search for ports using a regular expression. Port name, description and
hardware ID are searched. The function returns an iterable that returns the
same tuples as comport() would do.
"""
r = re.compile(regexp, re.I)
for info in comports():
port, desc, hwid = info
if r.search(port) or r.search(desc) or r.search(hwid):
yield info
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def main():
import argparse
parser = argparse.ArgumentParser(description='Serial port enumeration')
parser.add_argument(
'regexp',
nargs='?',
help='only show ports that match this regex')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='show more messages')
parser.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress all messages')
parser.add_argument(
'-n',
type=int,
help='only output the N-th entry')
args = parser.parse_args()
hits = 0
# get iteraror w/ or w/o filter
if args.regexp:
if not args.quiet:
sys.stderr.write("Filtered list with regexp: %r\n" % (args.regexp,))
iterator = sorted(grep(args.regexp))
else:
iterator = sorted(comports())
# list them
for n, (port, desc, hwid) in enumerate(iterator, 1):
if args.n is None or args.n == n:
sys.stdout.write("{:20}\n".format(port))
if args.verbose:
sys.stdout.write(" desc: {}\n".format(desc))
sys.stdout.write(" hwid: {}\n".format(hwid))
hits += 1
if not args.quiet:
if hits:
sys.stderr.write("{} ports found\n".format(hits))
else:
sys.stderr.write("no ports found\n")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
main()
| gpl-3.0 |
daoluan/decode-Django | Django-1.5.1/django/test/simple.py | 29 | 13725 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all it's aliases
dependencies_map = {}
# sanity check - no DB can depend on it's own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all it's dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (unittest.TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
test_db_name = None
# Actually create the database for the first connection
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, True))
if test_db_name is None:
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
else:
connection.settings_dict['NAME'] = test_db_name
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
| gpl-2.0 |
qimmer/mta13341 | libfreenect-master/wrappers/python/demo_mp_async.py | 6 | 1033 | #!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import signal
import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def body(*args):
if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
| mit |
rbprogrammer/advanced_python_topics | course-material/py3/solutions/06 Metaprogramming/Shapes/Solution.e/shapetst.py | 1 | 2071 | #!/usr/local/bin/python3
"""
Test harness to exercise the shape hierarchy
"""
from shape import *
def test_b():
print("\nTest b")
# Create derived-class object.
# Circle constructor arguments are: (centreX, centreY, radius)
pshape = Circle(10,10,10)
# The expected output is as follows:
# coordinates (50,100,70,120)
# area 314
pshape.translateX(50)
pshape.translateY(100)
pshape.display_coords()
print("Area = %d" % pshape.area())
def test_c():
print("\nTest c")
shptable = [ Circle(10,10,10),
Parallelogram(0,0,100,100,50) ]
# The expected output is as follows:
# circle: coords (10,10,30,30), area 314
# parallelogram: coords (10,10,110,110), area 5000
for shp in shptable:
shp.translateX(10)
shp.translateY(10)
shp.display_coords()
print("Area = %d" % shp.area())
print("\nDestroying the shapes")
def test_d():
print("\nTest d")
shptable = [ Circle(10,10,10),
Parallelogram(0,0,100,100,50),
Rectangle(50,50,150,150) ]
# The expected output is as follows:
# circle: coords (10,10,30,30), area 314
# parallelogram: coords (10,10,110,110), area 5000
# rectangle: coords (60,60,160,160), area 10000
for shp in shptable:
shp.translateX(10)
shp.translateY(10)
shp.display_coords()
print("Area = %d" % shp.area())
print("\nDestroying the shapes")
def test_e():
print("\nTest e")
shptable = [ Circle(75,75,25),
Parallelogram(0,0,120,30,40),
Rectangle(200,200,300,300)]
# The expected perimeter values are:
# 157 for the circle
# 260 for the parallelogram
# 400 for the rectangle
for shp in shptable:
print("Perimeter = %d" % shp.perimeter())
print("\nDestroying the shapes")
if __name__ == "__main__":
test_b()
test_c()
test_d()
test_e()
pass
| apache-2.0 |
amoose136/maratus | detectevent.py | 1 | 3686 | #!/usr/bin/env python
from __future__ import print_function
"""PyAudio Example: Play a wave file."""
import pyaudio
import wave
import sys
from pdb import set_trace as br
import numpy as np
from collections import deque
import cv2
import time
def record_audio():
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 25
WAVE_OUTPUT_FILENAME = "apt.wav"
class meta_state:
def __init__(self,history_length,target,tol):
self.cap = cv2.VideoCapture(1)
self.cap.set(cv2.CAP_PROP_CONTRAST, 5)
self.cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, -1)
self.cap.set(cv2.CAP_PROP_EXPOSURE,-50)
self.cap.set(cv2.CAP_PROP_BRIGHTNESS, 20)
self.num=0
self.state_list=deque([])
self.target_frequency=target
self.len=history_length
self.tolerance=tol
for _ in range(0,history_length):
self.state_list.append(False)
def push(self,freq):
if 730 <= freq <= 760:
self.state_list.append(True)
else:
self.state_list.append(False)
self.state_list.popleft()
if self.state_list[-1]==False and self.state_list[-1]!=self.state_list[-2]:
for i in range(1,len(self.state_list)):
if self.state_list[-i-1]==self.state_list[-1]:
break
else:
#if the first element is not equal to any of the other elements
# in the list and the first element is false (not lifting), we
# have just transitioned from lifting to not lifting. Take a picture.
# State must be: [false,true,true,true,true,...]
if i==len(self.state_list)-1:
time.sleep(.15)
print("Taking a picture")
ret, frame = self.cap.read()
if ret:
cv2.imwrite("test_"+str(self.num)+".jpg",frame)
self.num+=1
del ret,frame
ms=meta_state(10,746,100)
p = pyaudio.PyAudio()
SWIDTH=p.get_sample_size(FORMAT)
WINDOW = np.blackman(CHUNK)
#For live streaming:
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_device_index = 0)
print("Listening like a spider on in a web...")
## For streaming from wav file:
# f = wave.open('lift.wav')
# FORMAT=p.get_format_from_width(f.getsampwidth())
# CHANNELS=f.getnchannels()
# RATE=f.getframerate()
# print("FORMAT: "+str(FORMAT)+"\nCHANNELS: "+str(CHANNELS)+"\nRATE: "+str(RATE))
# stream = p.open(format=FORMAT,
# channels=CHANNELS,
# rate=RATE,
# output=True)
# data=f.readframes(CHUNK)
# print("Processing file...")
thefreq=0
frames = []
# for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
# while data != '':
while True:
data = stream.read(CHUNK,exception_on_overflow = False)
# data=f.readframes(CHUNK)
# stream.write(data)
indata = np.array(wave.struct.unpack("%dh"%(len(data)/SWIDTH),data))*WINDOW
# Take the fft and square each value
fftData=abs(np.fft.rfft(indata)**2)
# find the maximum
maxi = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if maxi != len(fftData)-1:
y0,y1,y2 = np.log(fftData[maxi-1:maxi+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (maxi+x1)*RATE/CHUNK
# print("The freq is %f Hz." % (thefreq))
else:
thefreq = maxi*RATE/CHUNK
# print("The freq is %f Hz." % (thefreq))
ms.push(thefreq)
# print(ms.state_list)
# frames.append(data)
print("* done\n")
stream.stop_stream()
stream.close()
p.terminate()
#uncomment below and frames.append above to save audio
# wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
# wf.setnchannels(CHANNELS)
# wf.setsampwidth(p.get_sample_size(FORMAT))
# wf.setframerate(RATE)
# wf.writeframes(b''.join(frames))
# wf.close()
record_audio()
| apache-2.0 |
getredash/redash | redash/authentication/ldap_auth.py | 3 | 2936 | import logging
import sys
from redash import settings
from flask import flash, redirect, render_template, request, url_for, Blueprint
from flask_login import current_user
try:
from ldap3 import Server, Connection
except ImportError:
if settings.LDAP_LOGIN_ENABLED:
sys.exit(
"The ldap3 library was not found. This is required to use LDAP authentication (see requirements.txt)."
)
from redash.authentication import (
create_and_login_user,
logout_and_redirect_to_index,
get_next_path,
)
from redash.authentication.org_resolving import current_org
from redash.handlers.base import org_scoped_rule
logger = logging.getLogger("ldap_auth")
blueprint = Blueprint("ldap_auth", __name__)
@blueprint.route(org_scoped_rule("/ldap/login"), methods=["GET", "POST"])
def login(org_slug=None):
index_url = url_for("redash.index", org_slug=org_slug)
unsafe_next_path = request.args.get("next", index_url)
next_path = get_next_path(unsafe_next_path)
if not settings.LDAP_LOGIN_ENABLED:
logger.error("Cannot use LDAP for login without being enabled in settings")
return redirect(url_for("redash.index", next=next_path))
if current_user.is_authenticated:
return redirect(next_path)
if request.method == "POST":
ldap_user = auth_ldap_user(request.form["email"], request.form["password"])
if ldap_user is not None:
user = create_and_login_user(
current_org,
ldap_user[settings.LDAP_DISPLAY_NAME_KEY][0],
ldap_user[settings.LDAP_EMAIL_KEY][0],
)
if user is None:
return logout_and_redirect_to_index()
return redirect(next_path or url_for("redash.index"))
else:
flash("Incorrect credentials.")
return render_template(
"login.html",
org_slug=org_slug,
next=next_path,
email=request.form.get("email", ""),
show_password_login=True,
username_prompt=settings.LDAP_CUSTOM_USERNAME_PROMPT,
hide_forgot_password=True,
)
def auth_ldap_user(username, password):
server = Server(settings.LDAP_HOST_URL, use_ssl=settings.LDAP_SSL)
if settings.LDAP_BIND_DN is not None:
conn = Connection(
server,
settings.LDAP_BIND_DN,
password=settings.LDAP_BIND_DN_PASSWORD,
authentication=settings.LDAP_AUTH_METHOD,
auto_bind=True,
)
else:
conn = Connection(server, auto_bind=True)
conn.search(
settings.LDAP_SEARCH_DN,
settings.LDAP_SEARCH_TEMPLATE % {"username": username},
attributes=[settings.LDAP_DISPLAY_NAME_KEY, settings.LDAP_EMAIL_KEY],
)
if len(conn.entries) == 0:
return None
user = conn.entries[0]
if not conn.rebind(user=user.entry_dn, password=password):
return None
return user
| bsd-2-clause |
philippjfr/bokeh | setup.py | 2 | 5418 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Setup script for Bokeh.
Two separate components comprise Bokeh:
* A JavaScript runtime BokehJS that draws and handles events in browsers
* Python "bindings" and an optional server for interacting with BokehJS
The BokehJS library is written in a mixture of CoffeeScript, TypeScript, and
pure JavaScript. This necessitates a "compilation" step to build a complete
BokehJS from these sources, and this fact makes the Bokeh setup and install
more complicated than typical pure Python projects.
In order to build BokehJS, the first step is to make sure that the "npm"
command is installed. If you are using conda, you can typically just run
conda install -c bokeh nodejs
Othewise, you can find general instructions for installing NodeJS here:
https://nodejs.org/en/download/
Once you have "npm" installed, this script can be used to build BokehJS
from the ``bokehjs`` source subdirectory, and install Bokeh into the python
source package by issuing the command:
python setup.py install --build-js
The script also supports the standard "develop" mode that setuptools offers:
python setup.py develop --build-js
It can take a few minutes for BokehJS to build, if you are not making changes
to the BokehJS source code, then you only need to build it once, the first
time. Subsequence invocations can be made to install the previously built
BokehJS from the ``bokehjs`` source subdirectoruy with the ``--install-js``
option, e.g:
python setup.py develop --install-js
It is also possible to build BokehJS "by hand" under the ``bokehjs`` source
subdirectory. In this case, to simply install the build BokehJS quickly into
the python source tree, the following command may be issued:
python setup.py --install-js
This will copy BokehJS from the ``bokehjs`` source directory, into the python
package directory, and perform no other actions.
Note that source distributions (sdists) are published with a pre-built BokehJS
included inside the python package, and do not include the ``bokehjs`` source.
The ``--build-js`` and ``-install-js`` options are not valid when running from
an sdist. They will be ignored, and warning printed.
'''
from os.path import join
from shutil import copy
import sys
from setuptools import find_packages, setup
from _setup_support import (
build_or_install_bokehjs, conda_rendering, fixup_building_sdist,
fixup_for_packaged, get_cmdclass, get_package_data, get_version,
install_js, package_files, package_path, ROOT, SERVER, show_bokehjs,
show_help
)
# immediately bail for ancient pythons
if sys.version_info[:2] < (2, 7):
raise RuntimeError("Bokeh requires python >= 2.7")
# immediately handle lightweight "python setup.py --install-js"
if len(sys.argv) == 2 and sys.argv[-1] == '--install-js':
install_js()
sys.exit()
# we want to have the license at the top level of the GitHub repo, but setup
# can't include it from there, so copy it to the package directory first thing
copy("LICENSE.txt", "bokeh/")
# state our runtime deps here, also used by meta.yaml (so KEEP the spaces)
REQUIRES = [
'six >=1.5.2',
'PyYAML >=3.10',
'python-dateutil >=2.1',
'Jinja2 >=2.7',
'numpy >=1.7.1',
'tornado >=4.3',
]
# handle the compat difference for futures (meta.yaml handles differently)
if sys.version_info[:2] == (2, 7):
REQUIRES.append('futures >=3.0.3')
# if this is just conda-build skimming information, skip all this actual work
if not conda_rendering():
fixup_for_packaged() # --build_js and --install_js not valid FROM sdist
fixup_building_sdist() # must build BokehJS when MAKING sdists
bokehjs_action = build_or_install_bokehjs()
# configuration to include all the special or non-python files in the package
# directory that need to also be installed or included in a build
package_path(join(SERVER, 'static'))
package_path(join(ROOT, 'bokeh', 'core', '_templates'))
package_path(join(ROOT, 'bokeh', 'sphinxext', '_templates'))
package_path(join(ROOT, 'bokeh', 'server', 'views'), ('.html'))
package_path(join(ROOT, 'bokeh', 'sampledata', '_data'))
package_files('LICENSE.txt', 'themes/*.yaml')
setup(
# basic package metadata
name='bokeh',
version=get_version(),
description='Interactive plots and applications in the browser from Python',
license='New BSD',
author='Anaconda',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
classifiers=open("classifiers.txt").read().strip().split('\n'),
# details needed by setup
install_requires=REQUIRES,
packages=find_packages(exclude=["scripts*", "tests*"]),
package_data=get_package_data(),
entry_points={'console_scripts': ['bokeh = bokeh.__main__:main',], },
zip_safe=False,
cmdclass=get_cmdclass()
)
# if this is just conda-build skimming information, skip all this actual work
if not conda_rendering():
if '--help' in sys.argv: show_help(bokehjs_action)
if 'develop' in sys.argv: show_bokehjs(bokehjs_action, develop=True)
if 'install' in sys.argv: show_bokehjs(bokehjs_action)
| bsd-3-clause |
anugrah-saxena/pycroscopy | pycroscopy/analysis/utils/tree.py | 1 | 6992 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 31 17:03:29 2016
@author: Suhas Somnath
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
# TODO: Test and debug node and clusterTree classes for agglomerative clustering etc
class Node(object):
"""
Basic unit of a tree - a node. Keeps track of its value, labels, parent, children, level in the tree etc.
"""
def __init__(self, name, value=None, parent=None, dist=0, labels=None, children=[], compute_mean=False,
verbose=False):
"""
Parameters
----------
name : (Optional) unsigned int
ID of this node
value : (Optional) 1D numpy array
Response corresponding to this Node.
parent : (Optional) unsigned int or Node object
Parent for this Node.
dist : (Optional) float
Distance between the children nodes
labels : (Optional) list or 1D numpy array of unsigned integers
Positions / instances in a main dataset within this cluster
children : (Optional) list of Node objects
Children for this node
compute_mean : (Optional) Boolean
Whether or not to compute the value attribute from the provided children
"""
self.children = children
self.parent = parent
self.name = name
self.value = value
self.dist = dist
self.level = 0
# Assign this node as the parent for all its children
for child in children:
child.parent = self
# If labels were not provided (tree node), get from children
if labels is None:
temp_labels = []
for child in self.children:
if verbose:
print('Child #{} had the following labels:'.format(child.name))
print(child.labels)
temp_labels.append(np.array(child.labels))
if verbose:
print('Labels (unsorted) derived from children for node #{}:'.format(name))
print(temp_labels)
self.labels = np.hstack(temp_labels)
self.labels.sort()
else:
if verbose:
print('Labels for leaf node #{}:'.format(name))
print(labels)
self.labels = np.array(labels, dtype=np.uint32)
# Compute the level for this node along with the number of children below it
if len(self.children) > 0:
self.num_nodes = 0
for child in self.children:
self.num_nodes += child.num_nodes
self.level = max(self.level, child.level)
self.level += 1 # because this node has to be one higher level than its highest children
else:
self.num_nodes = 1
if verbose:
print('Parent node:', str(name), 'has', str(self.num_nodes), 'children')
if all([len(self.children) > 0, type(value) == type(None), compute_mean]):
resp = []
for child in children:
if verbose:
print(' Child node', str(child.name), 'has', str(child.num_nodes), 'children')
# primitive method of equal bias mean: resp.append(child.value)
# weighted mean:
resp.append(child.value * child.labels.size / self.labels.size)
# self.value = np.mean(np.array(resp), axis=0)
self.value = np.sum(np.array(resp), axis=0)
def __str__(self):
return '({}) --> {},{}'.format(self.name, str(self.children[0].name), str(self.children[1].name))
class ClusterTree(object):
"""
Creates a tree representation from the provided linkage pairing. Useful for clustering
"""
def __init__(self, linkage_pairing, labels, distances=None, centroids=None):
"""
Parameters
----------
linkage_pairing : 2D unsigned int numpy array or list
Linkage pairing that describes a tree structure. The matrix should result in a single tree apex.
labels : 1D unsigned int numpy array or list
Labels assigned to each of the positions in the main dataset. Eg. Labels from clustering
distances : (Optional) 1D numpy float array or list
Distances between clusters
centroids : (Optional) 2D numpy array
Mean responses for each of the clusters. These will be propagated up
"""
self.num_leaves = linkage_pairing.shape[0] + 1
self.linkage = linkage_pairing
self.centroids = centroids
""" this list maintains pointers to the nodes pertaining to that cluster id for quick look-ups
By default this lookup table just contains the number indices of these clusters.
They will be replaced with node objects as and when the objects are created"""
self.nodes = list()
# now the labels is a giant list of labels assigned for each of the positions.
self.labels = np.array(labels, dtype=np.uint32)
""" the labels for the leaf nodes need to be calculated manually from the provided labels
Populate the lowest level nodes / leaves first:"""
for clust_id in range(self.num_leaves):
which_pos = np.where(self.labels == clust_id)
if centroids is not None:
self.nodes.append(Node(clust_id, value=centroids[clust_id], labels=which_pos))
else:
self.nodes.append(Node(clust_id, labels=which_pos))
for row in range(linkage_pairing.shape[0]):
"""print 'working on', linkage_pairing[row]
we already have each of these children in our look-up table"""
childs = [] # this is an empty list that will hold all the children corresponding to this node
for col in range(linkage_pairing.shape[1]):
""" look at each child in this row
look up the node object corresponding to this label """
childs.append(self.nodes[int(linkage_pairing[row, col])])
# Now this row results in a new node. That is what we create here and assign the children to this node
new_node = Node(row + self.num_leaves, children=childs, compute_mean=centroids is not None)
# If distances are provided, add the distances attribute to this node.
# This is the distance between the children
if distances is not None:
new_node.dist = distances[row]
# add this node to the look-up table:
self.nodes.append(new_node)
self.tree = self.nodes[-1]
def __str__(self):
"""
Overrides the to string representation. Prints the names of the node and its children.
Not very useful for large trees
Returns
--------
String representation of the tree structure
"""
return str(self.tree)
| mit |
washort/zamboni | mkt/access/migrations/0001_initial.py | 13 | 1814 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default=b'', max_length=255)),
('rules', models.TextField()),
('notes', models.TextField(blank=True)),
],
options={
'db_table': 'groups',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.ForeignKey(to='access.Group')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'groups_users',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='groupuser',
unique_together=set([('group', 'user')]),
),
migrations.AddField(
model_name='group',
name='users',
field=models.ManyToManyField(related_name='groups', through='access.GroupUser', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| bsd-3-clause |
beagles/neutron_hacking | neutron/tests/unit/test_extension_firewall.py | 11 | 20691 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
import copy
import mock
from webob import exc
import webtest
from neutron.extensions import firewall
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_api_v2_extension
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class FirewallExtensionTestCase(test_api_v2_extension.ExtensionTestCase):
fmt = 'json'
def setUp(self):
super(FirewallExtensionTestCase, self).setUp()
plural_mappings = {'firewall_policy': 'firewall_policies'}
self._setUpExtension(
'neutron.extensions.firewall.FirewallPluginBase',
constants.FIREWALL, firewall.RESOURCE_ATTRIBUTE_MAP,
firewall.Firewall, 'fw', plural_mappings=plural_mappings)
def test_create_firewall(self):
fw_id = _uuid()
data = {'firewall': {'description': 'descr_firewall1',
'name': 'firewall1',
'admin_state_up': True,
'firewall_policy_id': _uuid(),
'shared': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall'])
return_value.update({'id': fw_id})
# since 'shared' is hidden
del return_value['shared']
instance = self.plugin.return_value
instance.create_firewall.return_value = return_value
res = self.api.post(_get_path('fw/firewalls', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall.assert_called_with(mock.ANY,
firewall=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_list(self):
fw_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': fw_id}]
instance = self.plugin.return_value
instance.get_firewalls.return_value = return_value
res = self.api.get(_get_path('fw/firewalls', fmt=self.fmt))
instance.get_firewalls.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_get(self):
fw_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.get_firewall.return_value = return_value
res = self.api.get(_get_path('fw/firewalls',
id=fw_id, fmt=self.fmt))
instance.get_firewall.assert_called_with(mock.ANY,
fw_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_update(self):
fw_id = _uuid()
update_data = {'firewall': {'name': 'new_name'}}
return_value = {'tenant_id': _uuid(),
'id': fw_id}
instance = self.plugin.return_value
instance.update_firewall.return_value = return_value
res = self.api.put(_get_path('fw/firewalls', id=fw_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall.assert_called_with(mock.ANY, fw_id,
firewall=update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall', res)
self.assertEqual(res['firewall'], return_value)
def test_firewall_delete(self):
self._test_entity_delete('firewall')
def _test_create_firewall_rule(self, src_port, dst_port):
rule_id = _uuid()
data = {'firewall_rule': {'description': 'descr_firewall_rule1',
'name': 'rule1',
'shared': False,
'protocol': 'tcp',
'ip_version': 4,
'source_ip_address': '192.168.0.1',
'destination_ip_address': '127.0.0.1',
'source_port': src_port,
'destination_port': dst_port,
'action': 'allow',
'enabled': True,
'tenant_id': _uuid()}}
expected_ret_val = copy.copy(data['firewall_rule'])
expected_ret_val['source_port'] = str(src_port)
expected_ret_val['destination_port'] = str(dst_port)
expected_call_args = copy.copy(expected_ret_val)
expected_ret_val['id'] = rule_id
instance = self.plugin.return_value
instance.create_firewall_rule.return_value = expected_ret_val
res = self.api.post(_get_path('fw/firewall_rules', fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_rule.assert_called_with(mock.ANY,
firewall_rule=
{'firewall_rule':
expected_call_args})
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], expected_ret_val)
def test_create_firewall_rule_with_integer_ports(self):
self._test_create_firewall_rule(1, 10)
def test_create_firewall_rule_with_string_ports(self):
self._test_create_firewall_rule('1', '10')
def test_create_firewall_rule_with_port_range(self):
self._test_create_firewall_rule('1:20', '30:40')
def test_firewall_rule_list(self):
rule_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': rule_id}]
instance = self.plugin.return_value
instance.get_firewall_rules.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules', fmt=self.fmt))
instance.get_firewall_rules.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_rule_get(self):
rule_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.get_firewall_rule.return_value = return_value
res = self.api.get(_get_path('fw/firewall_rules',
id=rule_id, fmt=self.fmt))
instance.get_firewall_rule.assert_called_with(mock.ANY,
rule_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_update(self):
rule_id = _uuid()
update_data = {'firewall_rule': {'action': 'deny'}}
return_value = {'tenant_id': _uuid(),
'id': rule_id}
instance = self.plugin.return_value
instance.update_firewall_rule.return_value = return_value
res = self.api.put(_get_path('fw/firewall_rules', id=rule_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_rule.assert_called_with(mock.ANY,
rule_id,
firewall_rule=
update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_rule', res)
self.assertEqual(res['firewall_rule'], return_value)
def test_firewall_rule_delete(self):
self._test_entity_delete('firewall_rule')
def test_create_firewall_policy(self):
policy_id = _uuid()
data = {'firewall_policy': {'description': 'descr_firewall_policy1',
'name': 'new_fw_policy1',
'shared': False,
'firewall_rules': [_uuid(), _uuid()],
'audited': False,
'tenant_id': _uuid()}}
return_value = copy.copy(data['firewall_policy'])
return_value.update({'id': policy_id})
instance = self.plugin.return_value
instance.create_firewall_policy.return_value = return_value
res = self.api.post(_get_path('fw/firewall_policies',
fmt=self.fmt),
self.serialize(data),
content_type='application/%s' % self.fmt)
instance.create_firewall_policy.assert_called_with(mock.ANY,
firewall_policy=
data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_list(self):
policy_id = _uuid()
return_value = [{'tenant_id': _uuid(),
'id': policy_id}]
instance = self.plugin.return_value
instance.get_firewall_policies.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
fmt=self.fmt))
instance.get_firewall_policies.assert_called_with(mock.ANY,
fields=mock.ANY,
filters=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
def test_firewall_policy_get(self):
policy_id = _uuid()
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.get_firewall_policy.return_value = return_value
res = self.api.get(_get_path('fw/firewall_policies',
id=policy_id, fmt=self.fmt))
instance.get_firewall_policy.assert_called_with(mock.ANY,
policy_id,
fields=mock.ANY)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update(self):
policy_id = _uuid()
update_data = {'firewall_policy': {'audited': True}}
return_value = {'tenant_id': _uuid(),
'id': policy_id}
instance = self.plugin.return_value
instance.update_firewall_policy.return_value = return_value
res = self.api.put(_get_path('fw/firewall_policies',
id=policy_id,
fmt=self.fmt),
self.serialize(update_data))
instance.update_firewall_policy.assert_called_with(mock.ANY,
policy_id,
firewall_policy=
update_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertIn('firewall_policy', res)
self.assertEqual(res['firewall_policy'], return_value)
def test_firewall_policy_update_malformed_rules(self):
# emulating client request when no rule uuids are provided for
# --firewall_rules parameter
update_data = {'firewall_policy': {'firewall_rules': True}}
# have to check for generic AppError
self.assertRaises(
webtest.AppError,
self.api.put,
_get_path('fw/firewall_policies', id=_uuid(), fmt=self.fmt),
self.serialize(update_data))
def test_firewall_policy_delete(self):
self._test_entity_delete('firewall_policy')
def test_firewall_policy_insert_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
ref_firewall_rule_id = _uuid()
insert_data = {'firewall_rule_id': firewall_rule_id,
'insert_before': ref_firewall_rule_id,
'insert_after': None}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': [ref_firewall_rule_id,
firewall_rule_id]}}
instance = self.plugin.return_value
instance.insert_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="insert_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(insert_data))
instance.insert_rule.assert_called_with(mock.ANY, firewall_policy_id,
insert_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
def test_firewall_policy_remove_rule(self):
firewall_policy_id = _uuid()
firewall_rule_id = _uuid()
remove_data = {'firewall_rule_id': firewall_rule_id}
return_value = {'firewall_policy':
{'tenant_id': _uuid(),
'id': firewall_policy_id,
'firewall_rules': []}}
instance = self.plugin.return_value
instance.remove_rule.return_value = return_value
path = _get_path('fw/firewall_policies', id=firewall_policy_id,
action="remove_rule",
fmt=self.fmt)
res = self.api.put(path, self.serialize(remove_data))
instance.remove_rule.assert_called_with(mock.ANY, firewall_policy_id,
remove_data)
self.assertEqual(res.status_int, exc.HTTPOk.code)
res = self.deserialize(res)
self.assertEqual(res, return_value)
class FirewallExtensionTestCaseXML(FirewallExtensionTestCase):
fmt = 'xml'
class TestFirewallAttributeValidators(base.BaseTestCase):
def test_validate_port_range(self):
msg = firewall._validate_port_range(None)
self.assertIsNone(msg)
msg = firewall._validate_port_range('10')
self.assertIsNone(msg)
msg = firewall._validate_port_range(10)
self.assertIsNone(msg)
msg = firewall._validate_port_range(-1)
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:20')
self.assertIsNone(msg)
msg = firewall._validate_port_range('1:65535')
self.assertIsNone(msg)
msg = firewall._validate_port_range('0:65535')
self.assertEqual(msg, "Invalid port '0'")
msg = firewall._validate_port_range('1:65536')
self.assertEqual(msg, "Invalid port '65536'")
msg = firewall._validate_port_range('abc:efg')
self.assertEqual(msg, "Port 'abc' is not a valid number")
msg = firewall._validate_port_range('1:efg')
self.assertEqual(msg, "Port 'efg' is not a valid number")
msg = firewall._validate_port_range('-1:10')
self.assertEqual(msg, "Invalid port '-1'")
msg = firewall._validate_port_range('66000:10')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('10:66000')
self.assertEqual(msg, "Invalid port '66000'")
msg = firewall._validate_port_range('1:-10')
self.assertEqual(msg, "Invalid port '-10'")
def test_validate_ip_or_subnet_or_none(self):
msg = firewall._validate_ip_or_subnet_or_none(None)
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.1')
self.assertIsNone(msg)
msg = firewall._validate_ip_or_subnet_or_none('1.1.1.0/24')
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '1.1.1.1 has whitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\twhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
ip_addr = '111.1.1.1\nwhitespace'
msg = firewall._validate_ip_or_subnet_or_none(ip_addr)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (ip_addr,
ip_addr))
# Valid - IPv4
cidr = "10.0.2.0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 without final octets
cidr = "fe80::/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Valid - IPv6 with final octets
cidr = "fe80::0/24"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
cidr = "fe80::"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - IPv6 with final octets, missing mask
cidr = "fe80::0"
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertIsNone(msg)
# Invalid - Address format error
cidr = 'invalid'
msg = firewall._validate_ip_or_subnet_or_none(cidr, None)
self.assertEqual(msg, ("'%s' is not a valid IP address and "
"'%s' is not a valid IP subnet") % (cidr,
cidr))
| apache-2.0 |
dongjoon-hyun/tensorflow | tensorflow/python/ops/lookup_ops.py | 2 | 49459 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import six
from tensorflow.python.compat import compat as fwd_compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.training.checkpointable import tracking as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
@tf_export("initialize_all_tables")
@deprecated(None, "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
@tf_export("initializers.tables_initializer", "tables_initializer")
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype.base_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype.base_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(checkpointable.TrackableResource):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
super(LookupInterface, self).__init__()
def create_resource(self):
raise NotImplementedError
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase, self).__init__(initializer.key_dtype,
initializer.value_dtype)
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._initializer = initializer
self._resource_handle = self.create_resource()
self._init_op = self.initialize()
def initialize(self):
return self._initializer.initialize(self)
@property
def initializer(self):
return self._init_op
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self.name,
[self.resource_handle]) as scope:
return gen_lookup_ops.lookup_table_size_v2(
self.resource_handle, name=scope)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(
name, "%s_Lookup" % self.name,
(self.resource_handle, key_tensor, self._default_value)) as scope:
values = gen_lookup_ops.lookup_table_find_v2(
self.resource_handle, key_tensor, self._default_value, name=scope)
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.HashTable(
tf.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor)
table.init.run()
print(out.eval())
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
self._initializer = initializer
self._default_value = default_value
self._shared_name = shared_name
self._name = name
self._table_name = ""
super(HashTable, self).__init__(default_value, initializer)
self._value_shape = self._default_value.get_shape()
def create_resource(self):
with ops.name_scope(self._name, "hash_table",
(self._initializer, self._default_value)) as scope:
table_ref = gen_lookup_ops.hash_table_v2(
shared_name=self._shared_name,
key_dtype=self._initializer.key_dtype,
value_dtype=self._initializer.value_dtype,
name=scope)
self._table_name = scope.split("/")[-2]
return table_ref
@property
def name(self):
return self._table_name
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_Export" % self.name,
[self.resource_handle]) as name:
with ops.colocate_with(self.resource_handle):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.resource_handle, self._keys,
self._values)) as scope:
if context.executing_eagerly():
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
scope += str(ops.uid())
if fwd_compat.forward_compatible(2018, 9, 19):
init_op = gen_lookup_ops.lookup_table_import_v2(
table.resource_handle, self._keys, self._values, name=scope)
else:
# To maintain forward compatibiltiy, use the old implementation.
init_op = gen_lookup_ops.initialize_table_v2(
table.resource_handle, self._keys, self._values, name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.lookup.HashTable(tf.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.lookup.HashTable(tf.lookup.TextFileInitializer(
"test.txt", tf.string, tf.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.resource_handle,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
init_op = gen_lookup_ops.initialize_table_from_text_file_v2(
table.resource_handle,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
# If the filename tensor is anything other than a string constant (e.g., if
# it is a placeholder) then it does not make sense to track it as an asset.
if not context.executing_eagerly() and constant_op.is_constant(filename):
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file to get the
values from. The default is to use the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the whole line content.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
The `IdTableWithHashBuckets` object will performs the following mapping:
* `emerson -> 0`
* `lake -> 1`
* `palmer -> 2`
* `<other term> -> bucket_id`, where bucket_id will be between `3` and
`3 + num_oov_buckets - 1`, calculated by:
`hash(<term>) % num_oov_buckets + vocab_size`
If input_tensor is `["emerson", "lake", "palmer", "king", "crimson"]`,
the lookup result is `[0, 1, 2, 4, 7]`.
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print(out.eval())
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
self._table_name = name.split("/")[-1]
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64)
def create_resource(self):
if self._table is not None:
return self._table.create_resource()
return None
def initialize(self):
if self._table is not None:
return self._table.initialize()
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
@property
def initializer(self):
if self._table is not None:
return self._table._init_op # pylint: disable=protected-access
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
@property
def resource_handle(self):
if self._table is not None:
return self._table.resource_handle
return None
@property
def name(self):
return self._table_name
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
delimiter="\t"):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is
`[vocabulary size, vocabulary size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
To specify multi-column vocabulary files, use key_column_index and
value_column_index and delimiter.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the whole line content.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the line number, starting from zero.
delimiter: The delimiter to separate fields in a line.
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, six.string_types) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
vocab_file_value = vocabulary_file
if isinstance(vocabulary_file, ops.Tensor):
vocab_file_value = tensor_util.constant_value(vocabulary_file) or "?"
raise ValueError("vocab_size must be greater than 0, got %d. "
"vocabulary_file: %s" % (vocab_size, vocab_file_value))
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
key_column_index,
value_column_index)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
key_column_index,
value_column_index)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init",
key_column_index=key_column_index,
value_column_index=value_column_index,
delimiter=delimiter)
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`. The bucket ID range is
`[vocabulary list size, vocabulary list size + num_oov_buckets - 1]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `vocabulary_list` cannot have duplicates, otherwise when executing
the table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
table = tf.lookup.index_table_from_tensor(
vocabulary_list=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. The type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_list` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if context.executing_eagerly():
# Ensure a unique name when eager execution is enabled to avoid spurious
# sharing issues.
shared_name += str(ops.uid())
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
delimiter="\t"):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
To specify multi-column vocabulary files, use key_column_index and
value_column_index and delimiter.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename, may be a constant scalar `Tensor`.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file to get the `value`
values from. The default is to use the whole line content.
delimiter: The delimiter to separate fields in a line.
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if vocabulary_file is None or (
isinstance(vocabulary_file, six.string_types) and not vocabulary_file):
raise ValueError("vocabulary_file must be specified and must not be empty.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
key_column_index,
value_column_index)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file, key_column_index,
value_column_index)
init = TextFileStringTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
name="table_init",
key_column_index=key_column_index,
value_column_index=value_column_index,
delimiter=delimiter)
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `vocabulary_list` 1-D
`Tensor` where each element is a value and the corresponding index within the
tensor is the key.
Any input which does not have a corresponding index in 'vocabulary_list'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `vocabulary_list` cannot have duplicates, otherwise when executing
the table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = tf.constant(["emerson", "lake", "palmer"])
indices = tf.constant([1, 5], tf.int64)
table = tf.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
| apache-2.0 |
shr-project/opkg | tests/regress/opk.py | 3 | 2877 | import tarfile, os
import cfg
class Opk:
valid_control_fields = ["Package", "Version", "Depends", "Provides",\
"Replaces", "Conflicts", "Suggests", "Recommends",\
"Section", "Architecture", "Maintainer", "MD5Sum",\
"Size", "InstalledSize", "Filename", "Source",\
"Description", "OE", "Homepage", "Priority",\
"Conffiles"]
def __init__(self, **control):
for k in control.keys():
if k not in self.valid_control_fields:
raise Exception("Invalid control field: "
"{}".format(k))
if "Package" not in control.keys():
print("Cannot create opk without Package name.\n")
return None
if "Architecture" not in control.keys():
control["Architecture"] = "all"
if "Version" not in control.keys():
control["Version"] = "1.0"
self.control = control
def write(self, tar_not_ar=False, data_files=None):
filename = "{Package}_{Version}_{Architecture}.opk"\
.format(**self.control)
if os.path.exists(filename):
os.unlink(filename)
if os.path.exists("control"):
os.unlink("control")
if os.path.exists("control.tar.gz"):
os.unlink("control.tar.gz")
if os.path.exists("data.tar.gz"):
os.unlink("data.tar.gz")
f = open("control", "w")
for k in self.control.keys():
f.write("{}: {}\n".format(k, self.control[k]))
f.close()
tar = tarfile.open("control.tar.gz", "w:gz")
tar.add("control")
tar.close()
tar = tarfile.open("data.tar.gz", "w:gz")
if data_files:
for df in data_files:
tar.add(df)
tar.close()
if tar_not_ar:
tar = tarfile.open(filename, "w:gz")
tar.add("control.tar.gz")
tar.add("data.tar.gz")
tar.close()
else:
os.system("ar q {} control.tar.gz data.tar.gz \
2>/dev/null".format(filename))
os.unlink("control")
os.unlink("control.tar.gz")
os.unlink("data.tar.gz")
class OpkGroup:
def __init__(self):
self.opk_list = []
def add(self, **control):
self.opk_list.append(Opk(**control))
def addOpk(self, opk):
self.opk_list.append(opk)
def write_opk(self, tar_not_ar=False):
for o in self.opk_list:
o.write(tar_not_ar)
def write_list(self, filename="Packages"):
f = open(filename, "w")
for opk in self.opk_list:
for k in opk.control.keys():
f.write("{}: {}\n".format(k, opk.control[k]))
f.write("Filename: {Package}_{Version}_{Architecture}"
".opk\n".format(**opk.control))
f.write("\n")
f.close()
def regress_init():
"""
Initialisation and sanity checking.
"""
if not os.access(cfg.opkgcl, os.X_OK):
print("Cannot exec {}".format(cfg.opkgcl))
exit(False)
os.chdir(cfg.opkdir)
os.system("rm -fr {}".format(cfg.offline_root))
os.makedirs("{}/usr/lib/opkg".format(cfg.offline_root))
os.makedirs("{}/etc/opkg".format(cfg.offline_root))
f = open("{}/etc/opkg/opkg.conf".format(cfg.offline_root), "w")
f.write("arch all 1\n")
f.write("src test file:{}\n".format(cfg.opkdir))
f.close()
| gpl-2.0 |
rouge8/pex | pex/testing.py | 13 | 7806 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import contextlib
import os
import random
import subprocess
import sys
import tempfile
import zipfile
from collections import namedtuple
from textwrap import dedent
from .bin.pex import log, main
from .common import safe_mkdir, safe_rmtree
from .compatibility import nested
from .installer import EggInstaller, Packager
from .pex_builder import PEXBuilder
from .util import DistributionHelper, named_temporary_file
@contextlib.contextmanager
def temporary_dir():
td = tempfile.mkdtemp()
try:
yield td
finally:
safe_rmtree(td)
@contextlib.contextmanager
def temporary_filename():
"""Creates a temporary filename.
This is useful when you need to pass a filename to an API. Windows requires all
handles to a file be closed before deleting/renaming it, so this makes it a bit
simpler."""
with named_temporary_file() as fp:
fp.write(b'')
fp.close()
yield fp.name
def random_bytes(length):
return ''.join(
map(chr, (random.randint(ord('a'), ord('z')) for _ in range(length)))).encode('utf-8')
@contextlib.contextmanager
def temporary_content(content_map, interp=None, seed=31337):
"""Write content to disk where content is map from string => (int, string).
If target is int, write int random bytes. Otherwise write contents of string."""
random.seed(seed)
interp = interp or {}
with temporary_dir() as td:
for filename, size_or_content in content_map.items():
safe_mkdir(os.path.dirname(os.path.join(td, filename)))
with open(os.path.join(td, filename), 'wb') as fp:
if isinstance(size_or_content, int):
fp.write(random_bytes(size_or_content))
else:
fp.write((size_or_content % interp).encode('utf-8'))
yield td
def yield_files(directory):
for root, _, files in os.walk(directory):
for f in files:
filename = os.path.join(root, f)
rel_filename = os.path.relpath(filename, directory)
yield filename, rel_filename
def write_zipfile(directory, dest, reverse=False):
with contextlib.closing(zipfile.ZipFile(dest, 'w')) as zf:
for filename, rel_filename in sorted(yield_files(directory), reverse=reverse):
zf.write(filename, arcname=rel_filename)
return dest
PROJECT_CONTENT = {
'setup.py': dedent('''
from setuptools import setup
setup(
name=%(project_name)r,
version='0.0.0',
zip_safe=%(zip_safe)r,
packages=['my_package'],
scripts=[
'scripts/hello_world',
'scripts/shell_script',
],
package_data={'my_package': ['package_data/*.dat']},
install_requires=%(install_requires)r,
)
'''),
'scripts/hello_world': '#!/usr/bin/env python\nprint("hello world!")\n',
'scripts/shell_script': '#!/usr/bin/env bash\necho hello world\n',
'my_package/__init__.py': 0,
'my_package/my_module.py': 'def do_something():\n print("hello world!")\n',
'my_package/package_data/resource1.dat': 1000,
'my_package/package_data/resource2.dat': 1000,
}
@contextlib.contextmanager
def make_installer(name='my_project', installer_impl=EggInstaller, zip_safe=True,
install_reqs=None):
interp = {'project_name': name, 'zip_safe': zip_safe, 'install_requires': install_reqs or []}
with temporary_content(PROJECT_CONTENT, interp=interp) as td:
yield installer_impl(td)
@contextlib.contextmanager
def make_source_dir(name='my_project', install_reqs=None):
interp = {'project_name': name, 'zip_safe': True, 'install_requires': install_reqs or []}
with temporary_content(PROJECT_CONTENT, interp=interp) as td:
yield td
def make_sdist(name='my_project', zip_safe=True, install_reqs=None):
with make_installer(name=name, installer_impl=Packager, zip_safe=zip_safe,
install_reqs=install_reqs) as packager:
return packager.sdist()
@contextlib.contextmanager
def make_bdist(name='my_project', installer_impl=EggInstaller, zipped=False, zip_safe=True):
with make_installer(name=name, installer_impl=installer_impl, zip_safe=zip_safe) as installer:
dist_location = installer.bdist()
if zipped:
yield DistributionHelper.distribution_from_path(dist_location)
else:
with temporary_dir() as td:
extract_path = os.path.join(td, os.path.basename(dist_location))
with contextlib.closing(zipfile.ZipFile(dist_location)) as zf:
zf.extractall(extract_path)
yield DistributionHelper.distribution_from_path(extract_path)
COVERAGE_PREAMBLE = """
try:
from coverage import coverage
cov = coverage(auto_data=True, data_suffix=True)
cov.start()
except ImportError:
pass
"""
def write_simple_pex(td, exe_contents, dists=None, coverage=False):
"""Write a pex file that contains an executable entry point
:param td: temporary directory path
:param exe_contents: entry point python file
:type exe_contents: string
:param dists: distributions to include, typically sdists or bdists
:param coverage: include coverage header
"""
dists = dists or []
with open(os.path.join(td, 'exe.py'), 'w') as fp:
fp.write(exe_contents)
pb = PEXBuilder(path=td, preamble=COVERAGE_PREAMBLE if coverage else None)
for dist in dists:
pb.add_egg(dist.location)
pb.set_executable(os.path.join(td, 'exe.py'))
pb.freeze()
return pb
class IntegResults(namedtuple('results', 'output return_code exception')):
"""Convenience object to return integration run results."""
def assert_success(self):
assert self.exception is None and self.return_code is None
def assert_failure(self):
assert self.exception or self.return_code
def run_pex_command(args, env=None):
"""Simulate running pex command for integration testing.
This is different from run_simple_pex in that it calls the pex command rather
than running a generated pex. This is useful for testing end to end runs
with specific command line arguments or env options.
"""
def logger_callback(_output):
def mock_logger(msg, v=None):
_output.append(msg)
return mock_logger
exception = None
error_code = None
output = []
log.set_logger(logger_callback(output))
try:
main(args=args)
except SystemExit as e:
error_code = e.code
except Exception as e:
exception = e
return IntegResults(output, error_code, exception)
# TODO(wickman) Why not PEX.run?
def run_simple_pex(pex, args=(), env=None):
po = subprocess.Popen(
[sys.executable, pex] + list(args),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
po.wait()
return po.stdout.read().replace(b'\r', b''), po.returncode
def run_simple_pex_test(body, args=(), env=None, dists=None, coverage=False):
with nested(temporary_dir(), temporary_dir()) as (td1, td2):
pb = write_simple_pex(td1, body, dists=dists, coverage=coverage)
pex = os.path.join(td2, 'app.pex')
pb.build(pex)
return run_simple_pex(pex, args=args, env=env)
def _iter_filter(data_dict):
fragment = '/%s/_pex/' % PEXBuilder.BOOTSTRAP_DIR
for filename, records in data_dict.items():
try:
bi = filename.index(fragment)
except ValueError:
continue
# rewrite to look like root source
yield ('pex/' + filename[bi + len():], records)
def combine_pex_coverage(coverage_file_iter):
from coverage.data import CoverageData
combined = CoverageData(basename='.coverage_combined')
for filename in coverage_file_iter:
cov = CoverageData(basename=filename)
cov.read()
combined.add_line_data(dict(_iter_filter(cov.line_data())))
combined.add_arc_data(dict(_iter_filter(cov.arc_data())))
combined.write()
return combined.filename
| apache-2.0 |
slisson/intellij-community | python/helpers/docutils/readers/doctree.py | 246 | 1607 | # $Id: doctree.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martin Blais <[email protected]>
# Copyright: This module has been placed in the public domain.
"""Reader for existing document trees."""
from docutils import readers, utils, transforms
class Reader(readers.ReReader):
"""
Adapt the Reader API for an existing document tree.
The existing document tree must be passed as the ``source`` parameter to
the `docutils.core.Publisher` initializer, wrapped in a
`docutils.io.DocTreeInput` object::
pub = docutils.core.Publisher(
..., source=docutils.io.DocTreeInput(document), ...)
The original document settings are overridden; if you want to use the
settings of the original document, pass ``settings=document.settings`` to
the Publisher call above.
"""
supported = ('doctree',)
config_section = 'doctree reader'
config_section_dependencies = ('readers',)
def parse(self):
"""
No parsing to do; refurbish the document tree instead.
Overrides the inherited method.
"""
self.document = self.input
# Create fresh Transformer object, to be populated from Writer
# component.
self.document.transformer = transforms.Transformer(self.document)
# Replace existing settings object with new one.
self.document.settings = self.settings
# Create fresh Reporter object because it is dependent on
# (new) settings.
self.document.reporter = utils.new_reporter(
self.document.get('source', ''), self.document.settings)
| apache-2.0 |
dmitriiabramov/react | scripts/bench/measure.py | 20 | 4681 | #!/usr/bin/env python
# Copyright 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import functools
import json
import os
import subprocess
import sys
def _run_js_in_jsc(jit, js, env):
return subprocess.check_call(
['jsc', '-e', """
function now() {
return preciseTime() * 1000;
}
function globalEval(code) {
(0, eval)(code);
}
function report(label, time) {
print(label + '_' + %(engine)s, time);
}
this.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js,
'engine': json.dumps('jsc_' + ('jit' if jit else 'nojit')),
}],
env=dict(os.environ, JSC_useJIT='yes' if jit else 'no'),
)
_run_js_in_jsc_jit = functools.partial(_run_js_in_jsc, True)
_run_js_in_jsc_nojit = functools.partial(_run_js_in_jsc, False)
def _run_js_in_node(js, env):
return subprocess.check_call(
['node', '-e', """
function now() {
var hrTime = process.hrtime();
return hrTime[0] * 1e3 + hrTime[1] * 1e-6;
}
function globalEval(code) {
var vm = require('vm');
// Hide "module" so UMD wrappers use the global
vm.runInThisContext('(function(module){' + code + '\\n})()');
}
function readFile(filename) {
var fs = require('fs');
return fs.readFileSync(filename);
}
function report(label, time) {
console.log(label + '_node', time);
}
global.ENV = %(env)s;
%(js)s
""" % {
'env': json.dumps(env),
'js': js
}]
)
def _measure_ssr_ms(engine, react_path, bench_name, bench_path, measure_warm):
engine(
"""
var reactCode = readFile(ENV.react_path);
var START = now();
globalEval(reactCode);
var END = now();
if (typeof React !== 'object') throw new Error('React not laoded');
report('factory_ms', END - START);
globalEval(readFile(ENV.bench_path));
if (typeof Benchmark !== 'function') {
throw new Error('benchmark not loaded');
}
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_cold_ms', END - START);
var warmup = ENV.measure_warm ? 80 : 0;
var trials = ENV.measure_warm ? 40 : 0;
for (var i = 0; i < warmup; i++) {
React.renderToString(React.createElement(Benchmark));
}
for (var i = 0; i < trials; i++) {
var START = now();
var html = React.renderToString(React.createElement(Benchmark));
html.charCodeAt(0); // flatten ropes
var END = now();
report('ssr_' + ENV.bench_name + '_warm_ms', END - START);
}
""",
{
'bench_name': bench_name,
'bench_path': bench_path,
'measure_warm': measure_warm,
'react_path': react_path,
},
)
def _main():
if len(sys.argv) != 2:
sys.stderr.write("usage: measure.py react.min.js >out.txt\n")
return 1
react_path = sys.argv[1]
trials = 30
sys.stderr.write("Measuring SSR for PE benchmark (%d trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', False)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
trials = 3
sys.stderr.write("Measuring SSR for PE with warm JIT (%d slow trials)\n" % trials)
for i in range(trials):
for engine in [
_run_js_in_jsc_jit,
_run_js_in_jsc_nojit,
_run_js_in_node
]:
_measure_ssr_ms(engine, react_path, 'pe', 'bench-pe-es5.js', True)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
if __name__ == '__main__':
sys.exit(_main())
| bsd-3-clause |
F5Networks/f5-common-python | f5/bigip/tm/auth/remote_user.py | 1 | 1086 | # coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IP® auth module
REST URI
``http://localhost/mgmt/tm/auth/remote-user``
GUI Path
``System --> Users --> Authentication``
REST Kind
``tm:auth:remote-user:*``
"""
from f5.bigip.resource import UnnamedResource
class Remote_User(UnnamedResource):
"""BIG-IP® auth source resource"""
def __init__(self, auth):
super(Remote_User, self).__init__(auth)
self._meta_data['required_json_kind'] = \
'tm:auth:remote-user:remote-userstate'
| apache-2.0 |
caphrim007/ansible | lib/ansible/galaxy/login.py | 104 | 4619 | ########################################################################
#
# (C) 2015, Chris Houseknecht <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import json
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.six.moves import input
from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlparse
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.urls import open_url
from ansible.utils.color import stringc
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyLogin(object):
''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
GITHUB_AUTH = 'https://api.github.com/authorizations'
def __init__(self, galaxy, github_token=None):
self.galaxy = galaxy
self.github_username = None
self.github_password = None
if github_token is None:
self.get_credentials()
def get_credentials(self):
display.display(u'\n\n' + "We need your " + stringc("Github login", 'bright cyan') +
" to identify you.", screen_only=True)
display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') +
", only to " + stringc("api.github.com.", "yellow"), screen_only=True)
display.display("The password will not be displayed." + u'\n\n', screen_only=True)
display.display("Use " + stringc("--github-token", 'yellow') +
" if you do not want to enter your password." + u'\n\n', screen_only=True)
try:
self.github_username = input("Github Username: ")
except:
pass
try:
self.github_password = getpass.getpass("Password for %s: " % self.github_username)
except:
pass
if not self.github_username or not self.github_password:
raise AnsibleError("Invalid Github credentials. Username and password are required.")
def remove_github_token(self):
'''
If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
retrieve the token after creation, so we are forced to create a new one.
'''
try:
tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True,))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
for token in tokens:
if token['note'] == 'ansible-galaxy login':
display.vvvvv('removing token: %s' % token['token_last_eight'])
try:
open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username,
url_password=self.github_password, method='DELETE', force_basic_auth=True)
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
def create_github_token(self):
'''
Create a personal authorization token with a note of 'ansible-galaxy login'
'''
self.remove_github_token()
args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"})
try:
data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
url_password=self.github_password, force_basic_auth=True, data=args))
except HTTPError as e:
res = json.load(e)
raise AnsibleError(res['message'])
return data['token']
| gpl-3.0 |
FR4NK-W/osourced-scion | python/test/lib/sibra/ext/sof_test.py | 3 | 3422 | # Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`sof_test` --- lib.sibra.ext.sof unit tests
================================================
"""
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.sibra.ext.sof import SibraOpaqueField
from test.testcommon import create_mock
class TestSibraOpaqueFieldParse(object):
"""
Unit tests for lib.sibra.ext.sof.SibraOpaqueField._parse
"""
@patch("lib.sibra.ext.sof.Raw", autospec=True)
def test(self, raw):
inst = SibraOpaqueField()
data = create_mock(["pop"])
data.pop.side_effect = bytes(range(4)), "mac"
raw.return_value = data
# Call
inst._parse("data")
# Tests
raw.assert_called_once_with("data", inst.NAME, inst.LEN)
ntools.eq_(inst.ingress, 0x0001)
ntools.eq_(inst.egress, 0x0203)
ntools.eq_(inst.mac, "mac")
class TestSibraOpaqueFieldPack(object):
"""
Unit tests for lib.sibra.ext.sof.SibraOpaqueField.pack
"""
def test(self):
inst = SibraOpaqueField()
inst.ingress = 0x0001
inst.egress = 0x0203
inst.mac = b"mac"
# Call
ntools.eq_(inst.pack(), bytes(range(4)) + b"mac")
class TestSibraOpaqueFieldCalcMac(object):
"""
Unit tests for lib.sibra.ext.sof.SibraOpaqueField.calc_mac
"""
@patch("lib.sibra.ext.sof.mac", autospec=True)
def test_steady_no_prev(self, mac):
inst = SibraOpaqueField()
inst.ingress = 0x1111
inst.egress = 0xFFFF
mac.return_value = "cmac123"
info = create_mock(["LEN", "pack"])
info.LEN = 8
info.pack.return_value = b"packinfo"
# Call
ntools.eq_(inst.calc_mac(info, "key", [b"path id0"]), "cmac")
# Tests
mac.assert_called_once_with("key", b"".join([
bytes.fromhex("1111 FFFF"), b"packinfo", b"path id0",
bytes(inst.MAX_PATH_IDS_LEN - 8), bytes(8),
bytes(inst.MAC_BLOCK_PADDING),
]))
@patch("lib.sibra.ext.sof.mac", autospec=True)
def test_ephemeral_prev(self, mac):
inst = SibraOpaqueField()
inst.ingress = 0x1111
inst.egress = 0xFFFF
mac.return_value = "cmac123"
info = create_mock(["LEN", "pack"])
info.LEN = 8
info.pack.return_value = b"packinfo"
path_ids = b"steadyid", b"ephemeralpath id"
prev_raw = b"deadbeef"
# Call
ntools.eq_(inst.calc_mac(info, "key", path_ids, prev_raw), "cmac")
# Tests
mac.assert_called_once_with("key", b"".join([
bytes.fromhex("1111 FFFF"), b"packinfo",
b"steadyid", b"ephemeralpath id", bytes(inst.MAX_PATH_IDS_LEN - 24),
prev_raw, bytes(inst.MAC_BLOCK_PADDING),
]))
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| apache-2.0 |
rs2/bokeh | bokeh/models/tests/test_formatters.py | 6 | 2350 | from textwrap import dedent
import pytest
from bokeh.models import FuncTickFormatter, Slider
flexx = pytest.importorskip("flexx")
def test_functickformatter_from_py_func_no_args():
def convert_to_minutes():
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = flexx.pyscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter();\n"
def test_functickformatter_from_py_func_with_args():
slider = Slider()
def convert_to_minutes(x=slider):
return tick * 60 # noqa
formatter = FuncTickFormatter.from_py_func(convert_to_minutes)
js_code = flexx.pyscript.py2js(convert_to_minutes, 'formatter')
function_wrapper = formatter.code.replace(js_code, '')
assert function_wrapper == "return formatter(x);\n"
assert formatter.args['x'] is slider
def test_functickformatter_bad_pyfunc_formats():
def has_positional_arg(x):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg)
def has_positional_arg_with_kwargs(y, x=5):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_positional_arg_with_kwargs)
def has_non_Model_keyword_argument(x=10):
return None
with pytest.raises(ValueError):
FuncTickFormatter.from_py_func(has_non_Model_keyword_argument)
def test_functickformatter_from_coffeescript_no_arg():
coffee_code = dedent("""
square = (x) -> x * x
return square(tick)
""")
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code)
assert formatter.code == dedent("""\
var square;
square = function (x) {
return x * x;
};
return square(tick);
""")
assert formatter.args == {}
def test_functickformatter_from_coffeescript_with_args():
coffee_code = dedent("""
return slider.get("value") // 2 + tick
""")
slider = Slider()
formatter = FuncTickFormatter.from_coffeescript(code=coffee_code, args={"slider": slider})
assert formatter.code == dedent("""\
return Math.floor(slider.get("value") / 2) + tick;
""")
assert formatter.args == {"slider": slider}
| bsd-3-clause |
daniaki/Enrich2 | enrich2/tests/test_selection_valueerrors.py | 1 | 1835 | # Copyright 2016-2017 Alan F Rubin
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import unittest
from ..selection.selection import Selection
from .utilities import load_config_data, update_cfg_file
CFG_PATH = "data/config/selection/"
READS_DIR = "data/reads/selection/"
RESULT_DIR = "data/result/selection/"
class TestSelectionRaisesValueErrorOnlyWTCounts(unittest.TestCase):
def setUp(self):
cfg = load_config_data("selection_valueerror_only_wt.json", CFG_PATH)
cfg = update_cfg_file(cfg, 'counts', 'wt')
obj = Selection()
obj.force_recalculate = False
obj.component_outliers = False
obj.tsv_requested = False
obj.output_dir_override = False
# perform the analysis
obj.configure(cfg)
obj.validate()
obj.store_open(children=True)
self.obj = obj
def tearDown(self):
self.obj.store_close(children=True)
os.remove(self.obj.store_path)
shutil.rmtree(self.obj.output_dir)
def test_value_error_only_wt_counts_in_timepoints(self):
with self.assertRaises(ValueError):
self.obj.calculate()
if __name__ == "__main__":
unittest.main() | gpl-3.0 |
rapidhere/rpbtman_autosign | pytz/zoneinfo/America/Indiana/Vevay.py | 9 | 3701 | '''tzinfo timezone information for America/Indiana/Vevay.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Vevay(DstTzInfo):
'''America/Indiana/Vevay timezone definition. See datetime.tzinfo for details'''
zone = 'America/Indiana/Vevay'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1954,4,25,8,0,0),
d(1969,4,27,7,0,0),
d(1969,10,26,6,0,0),
d(1970,4,26,7,0,0),
d(1970,10,25,6,0,0),
d(1971,4,25,7,0,0),
d(1971,10,31,6,0,0),
d(1972,4,30,7,0,0),
d(1972,10,29,6,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Vevay = Vevay()
| gpl-3.0 |
yephper/django | tests/defer_regress/models.py | 1 | 2798 | """
Regression tests for defer() / only() behavior.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=15)
text = models.TextField(default="xyzzy")
value = models.IntegerField()
other_value = models.IntegerField(default=0)
def __str__(self):
return self.name
class RelatedItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
class ProxyRelated(RelatedItem):
class Meta:
proxy = True
class Child(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
@python_2_unicode_compatible
class Leaf(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.CASCADE)
second_child = models.ForeignKey(Child, models.SET_NULL, related_name="other", null=True)
value = models.IntegerField(default=42)
def __str__(self):
return self.name
class ResolveThis(models.Model):
num = models.FloatField()
name = models.CharField(max_length=16)
class Proxy(Item):
class Meta:
proxy = True
@python_2_unicode_compatible
class SimpleItem(models.Model):
name = models.CharField(max_length=15)
value = models.IntegerField()
def __str__(self):
return self.name
class Feature(models.Model):
item = models.ForeignKey(SimpleItem, models.CASCADE)
class SpecialFeature(models.Model):
feature = models.ForeignKey(Feature, models.CASCADE)
class OneToOneItem(models.Model):
item = models.OneToOneField(Item, models.CASCADE, related_name="one_to_one_item")
name = models.CharField(max_length=15)
class ItemAndSimpleItem(models.Model):
item = models.ForeignKey(Item, models.CASCADE)
simple = models.ForeignKey(SimpleItem, models.CASCADE)
class Profile(models.Model):
profile1 = models.CharField(max_length=1000, default='profile1')
class Location(models.Model):
location1 = models.CharField(max_length=1000, default='location1')
class Request(models.Model):
profile = models.ForeignKey(Profile, models.SET_NULL, null=True, blank=True)
location = models.ForeignKey(Location, models.CASCADE)
items = models.ManyToManyField(Item)
request1 = models.CharField(default='request1', max_length=1000)
request2 = models.CharField(default='request2', max_length=1000)
request3 = models.CharField(default='request3', max_length=1000)
request4 = models.CharField(default='request4', max_length=1000)
class Base(models.Model):
text = models.TextField()
class Derived(Base):
other_text = models.TextField()
| bsd-3-clause |
arborh/tensorflow | tensorflow/python/framework/test_util_test.py | 4 | 30090 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFuncionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFuncionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
is_building_function = ops.get_default_graph().building_function
if is_building_function:
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFuncionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
clayz/crazy-quiz-web | lib/oauthlib/oauth2/rfc6749/endpoints/token.py | 71 | 3673 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class TokenEndpoint(BaseEndpoint):
"""Token issuing endpoint.
The token endpoint is used by the client to obtain an access token by
presenting its authorization grant or refresh token. The token
endpoint is used with every authorization grant except for the
implicit grant type (since an access token is issued directly).
The means through which the client obtains the location of the token
endpoint are beyond the scope of this specification, but the location
is typically provided in the service documentation.
The endpoint URI MAY include an "application/x-www-form-urlencoded"
formatted (per `Appendix B`_) query component,
which MUST be retained when adding additional query parameters. The
endpoint URI MUST NOT include a fragment component::
https://example.com/path?query=component # OK
https://example.com/path?query=component#fragment # Not OK
Since requests to the authorization endpoint result in user
Since requests to the token endpoint result in the transmission of
clear-text credentials (in the HTTP request and response), the
authorization server MUST require the use of TLS as described in
Section 1.6 when sending requests to the token endpoint::
# We will deny any request which URI schema is not with https
The client MUST use the HTTP "POST" method when making access token
requests::
# HTTP method is currently not enforced
Parameters sent without a value MUST be treated as if they were
omitted from the request. The authorization server MUST ignore
unrecognized request parameters. Request and response parameters
MUST NOT be included more than once::
# Delegated to each grant type.
.. _`Appendix B`: http://tools.ietf.org/html/rfc6749#appendix-B
"""
def __init__(self, default_grant_type, default_token_type, grant_types):
BaseEndpoint.__init__(self)
self._grant_types = grant_types
self._default_token_type = default_token_type
self._default_grant_type = default_grant_type
@property
def grant_types(self):
return self._grant_types
@property
def default_grant_type(self):
return self._default_grant_type
@property
def default_grant_type_handler(self):
return self.grant_types.get(self.default_grant_type)
@property
def default_token_type(self):
return self._default_token_type
@catch_errors_and_unavailability
def create_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Extract grant_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = None
request.extra_credentials = credentials
grant_type_handler = self.grant_types.get(request.grant_type,
self.default_grant_type_handler)
log.debug('Dispatching grant_type %s request to %r.',
request.grant_type, grant_type_handler)
return grant_type_handler.create_token_response(
request, self.default_token_type)
| apache-2.0 |
TechBK/horizon-dev | openstack_dashboard/dashboards/admin/aggregates/urls.py | 46 | 1252 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.aggregates \
import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.aggregates.views',
url(r'^$',
views.IndexView.as_view(), name='index'),
url(r'^create/$',
views.CreateView.as_view(), name='create'),
url(r'^(?P<id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<id>[^/]+)/update_metadata/$',
views.UpdateMetadataView.as_view(), name='update_metadata'),
url(r'^(?P<id>[^/]+)/manage_hosts/$',
views.ManageHostsView.as_view(), name='manage_hosts'),
)
| apache-2.0 |
ykim362/mxnet | tests/python/train/test_bucketing.py | 36 | 4868 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import numpy as np
import mxnet as mx
import random
from random import randint
def test_bucket_module():
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(console)
class DummySentenceIter(mx.rnn.BucketSentenceIter):
"""Dummy sentence iterator to output sentences the same as input.
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='l2_label', dtype='float32',
layout='NTC'):
super(DummySentenceIter, self).__init__(sentences, batch_size,
buckets=buckets, invalid_label=invalid_label,
data_name=data_name, label_name=label_name,
dtype=dtype, layout=layout)
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
self.nddata.append(mx.nd.array(buck, dtype=self.dtype))
self.ndlabel.append(mx.nd.array(buck, dtype=self.dtype))
batch_size = 128
num_epochs = 5
num_hidden = 25
num_embed = 25
num_layers = 2
len_vocab = 50
buckets = [10, 20, 30, 40]
invalid_label = 0
num_sentence = 1000
train_sent = []
val_sent = []
for _ in range(num_sentence):
len_sentence = randint(1, max(buckets) + 10)
train_sentence = []
val_sentence = []
for _ in range(len_sentence):
train_sentence.append(randint(1, len_vocab))
val_sentence.append(randint(1, len_vocab))
train_sent.append(train_sentence)
val_sent.append(val_sentence)
data_train = DummySentenceIter(train_sent, batch_size, buckets=buckets,
invalid_label=invalid_label)
data_val = DummySentenceIter(val_sent, batch_size, buckets=buckets,
invalid_label=invalid_label)
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layers):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i))
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('l2_label')
embed = mx.sym.Embedding(data=data, input_dim=len_vocab,
output_dim=num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=1, name='pred')
pred = mx.sym.reshape(pred, shape=(batch_size, -1))
loss = mx.sym.LinearRegressionOutput(pred, label, name='l2_loss')
return loss, ('data',), ('l2_label',)
contexts = mx.cpu(0)
model = mx.mod.BucketingModule(
sym_gen=sym_gen,
default_bucket_key=data_train.default_bucket_key,
context=contexts)
logging.info('Begin fit...')
model.fit(
train_data=data_train,
eval_data=data_val,
eval_metric=mx.metric.MSE(),
kvstore='device',
optimizer='sgd',
optimizer_params={'learning_rate': 0.01,
'momentum': 0,
'wd': 0.00001},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
num_epoch=num_epochs,
batch_end_callback=mx.callback.Speedometer(batch_size, 50))
logging.info('Finished fit...')
assert model.score(data_val, mx.metric.MSE())[0][1] < 350, "High mean square error."
if __name__ == "__main__":
test_bucket_module()
| apache-2.0 |
Comunitea/CMNT_00040_2016_ELN_addons | mrp_stock_forecast/__openerp__.py | 2 | 1649 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "MRP-Stock forecast",
"version" : "1.0",
"author" : "Pexego",
"website" : "http://www.pexego.es",
"category" : "Manufacturing",
"description": """MRP Stock forecast""",
"depends" : ['mrp','base', 'stock', 'product_format'],
"init_xml" : [],
"demo_xml" : [],
"data" : ['security/ir.model.access.csv',
'security/security.xml',
'mrp_forecast_view.xml',
'stock_forecast_view.xml',
'wizard/merge_forecasts_view.xml',
],
"installable": True,
'active': False
}
| agpl-3.0 |
conwin/node-gyp | gyp/test/builddir/gyptest-default.py | 74 | 2670 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
# Android does not support setting the build directory.
test = TestGyp.TestGyp(formats=['!make', '!ninja', '!android'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
if test.format == 'msvs':
if test.uses_msbuild:
test.must_contain('src/prog1.vcxproj',
'<OutDir>..\\builddir\\Default\\</OutDir>')
else:
test.must_contain('src/prog1.vcproj',
'OutputDirectory="..\\builddir\\Default\\"')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
| mit |
freinque/walksafr | mainapp/models.py | 1 | 5525 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'
# into your database.
from __future__ import unicode_literals
from django.db import models
from django.forms.extras.widgets import SelectDateWidget
import datetime
#from location_field.models.plain import PlainLocationField
#from django.contrib.gis.db import models
#from django.contrib.gis.geos import Point
#from location_field.models.spatial import LocationField
#class Place(models.Model):
# city = models.CharField(max_length=255)
# location = LocationField(based_fields=[city], zoom=7, default='Point(1.0 1.0)')
# objects = models.GeoManager()
class Ends(models.Model):
orig_city = models.CharField(max_length=255)
dest_city = models.CharField(max_length=255)
ends_datetime = models.DateTimeField(default=datetime.datetime.now(), blank=True, null=True)
ends_date = models.DateField(default=datetime.datetime.now().date(), blank=True, null=True)
ends_time = models.TimeField(default=datetime.datetime.now().time(), blank=True, null=True )
orig_lati = models.CharField(max_length=255)#PlainLocationField(based_fields=[city], zoom=7)
orig_long = models.CharField(max_length=255)#PlainLocationField(based_fields=[city], zoom=7)
dest_lati = models.CharField(max_length=255)#PlainLocationField(based_fields=[city], zoom=7)
dest_long = models.CharField(max_length=255)#PlainLocationField(based_fields=[city], zoom=7)
class Crimes(models.Model):
id = models.IntegerField( primary_key=True)
#index = models.IntegerField( blank=True, null=True )
incidntnum = models.BigIntegerField(db_column='IncidntNum', blank=True, null=True) # Field name made lowercase.
category = models.TextField(db_column='Category', blank=True, null=True) # Field name made lowercase.
descript = models.TextField(db_column='Descript', blank=True, null=True) # Field name made lowercase.
dayofweek = models.TextField(db_column='DayOfWeek', blank=True, null=True) # Field name made lowercase.
date = models.TextField(db_column='Date', blank=True, null=True) # Field name made lowercase.
time = models.TextField(db_column='Time', blank=True, null=True) # Field name made lowercase.
pddistrict = models.TextField(db_column='PdDistrict', blank=True, null=True) # Field name made lowercase.
resolution = models.TextField(db_column='Resolution', blank=True, null=True) # Field name made lowercase.
address = models.TextField(db_column='Address', blank=True, null=True) # Field name made lowercase.
x = models.FloatField(db_column='X', blank=True, null=True) # Field name made lowercase.
y = models.FloatField(db_column='Y', blank=True, null=True) # Field name made lowercase.
location = models.TextField(db_column='Location', blank=True, null=True) # Field name made lowercase.
pdid = models.BigIntegerField(db_column='PdId', blank=True, null=True) # Field name made lowercase.
datetime = models.DateTimeField(db_column='DateTime', blank=True, null=True) # Field name made lowercase.
city = models.CharField(max_length=30, blank=True, null=True)
ns_time = models.TimeField(db_column='ns_time', blank=True, null=True)
ns_date = models.DateField(db_column='ns_date', blank=True, null=True)
ns_dayofweek = models.IntegerField(db_column='ns_dayofweek', blank=True, null=True)
class Meta:
managed = False
db_table = 'crimes'
class PopDensity(models.Model):
id = models.IntegerField( primary_key=True)
index = models.BigIntegerField(blank=True, null=True)
zip = models.BigIntegerField(blank=True, null=True)
y = models.FloatField(blank=True, null=True)
x = models.FloatField(blank=True, null=True)
zip_zcta = models.BigIntegerField(db_column='Zip/ZCTA', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
number_2010_population = models.BigIntegerField(db_column='2010 Population', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters. Field renamed because it wasn't a valid Python identifier.
land_sq_mi = models.FloatField(db_column='Land-Sq-Mi', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
density = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'pop_density'
class Tweets(models.Model):
tweet_id = models.BigIntegerField(blank=True, null=True)
datetime = models.TextField(blank=True, null=True)
x = models.FloatField(blank=True, null=True)
y = models.FloatField(blank=True, null=True)
tweet = models.TextField(blank=True, null=True)
ns_datetime = models.DateTimeField(blank=True, null=True)
ns_date = models.DateField(blank=True, null=True)
ns_time = models.TimeField(blank=True, null=True)
ns_dayofweek = models.IntegerField(blank=True, null=True)
city = models.CharField(max_length=30, blank=True, null=True)
id = models.IntegerField( primary_key=True )
class Meta:
managed = False
db_table = 'tweets'
| gpl-2.0 |
saurabh3949/mxnet | tests/nightly/compilation_warnings/process_output.py | 9 | 2164 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
import operator
def process_output(command_output):
warnings = {}
regex = r"(.*):\swarning:\s(.*)"
lines = command_output.split("\n")
for line in lines[:-2]:
matches = re.finditer(regex, line)
for matchNum, match in enumerate(matches):
try:
warnings[match.group()] +=1
except KeyError:
warnings[match.group()] =1
time = lines[-2]
return time, warnings
def generate_stats(warnings):
total_count = sum(warnings.values())
sorted_warnings = sorted(warnings.items(), key=operator.itemgetter(1), reverse=True)
return sorted_warnings, total_count
def print_summary(time, warnings):
sorted_warnings, total_count = generate_stats(warnings)
print "START - Compilation warnings count"
print total_count, 'warnings'
print "END - Compilation warnings count"
print 'START - Compilation warnings summary'
print 'Time taken to compile:', time, 's'
print 'Total number of warnings:', total_count, '\n'
print 'Below is the list of unique warnings and the number of occurrences of that warning'
for warning, count in sorted_warnings:
print count, ': ', warning
print 'END - Compilation warnings summary'
c_output = open(sys.argv[1],'r')
time, warnings = process_output(c_output.read())
print_summary(time, warnings)
| apache-2.0 |
azunite/wireshark_1023 | tools/dftestlib/time_relative.py | 40 | 1244 | # Copyright (c) 2013 by Gilbert Ramirez <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testTimeRelative(dftest.DFTest):
trace_file = "nfs.pcap"
def test_relative_time_1(self):
dfilter = "frame.time_delta == 0.7"
self.assertDFilterCount(dfilter, 1)
def test_relative_time_2(self):
dfilter = "frame.time_delta > 0.7"
self.assertDFilterCount(dfilter, 0)
def test_relative_time_3(self):
dfilter = "frame.time_delta < 0.7"
self.assertDFilterCount(dfilter, 1)
| gpl-2.0 |
niboshi/chainer | tests/chainer_tests/distributions_tests/test_beta.py | 8 | 1105 | import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestBeta(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Beta
self.scipy_dist = stats.beta
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
a = numpy.random.uniform(0, 10, self.shape).astype(numpy.float32)
b = numpy.random.uniform(0, 10, self.shape).astype(numpy.float32)
self.params = {'a': a, 'b': b}
self.scipy_params = {'a': a, 'b': b}
self.support = '[0, 1]'
def sample_for_test(self):
smp = numpy.random.uniform(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| mit |
kidaa30/spacewalk | scripts/ncsu-rhntools/oldSystems.py | 11 | 4028 | #!/usr/bin/python
# oldSystems.py - Find and possibly remove inactive systems from RHN
# Copyright (C) 2007 NC State University
# Written by Jack Neely <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sys
import xmlrpclib
import time
import optparse
from datetime import date
from datetime import timedelta
from rhnapi import RHNClient
# Stolen from Yum
# Copyright 2005 Duke University
def userconfirm():
"""gets a yes or no from the user, defaults to No"""
while True:
choice = raw_input('Is this ok [y/N]: ')
choice = choice.lower()
if len(choice) == 0 or choice[0] in ['y', 'n']:
break
if len(choice) == 0 or choice[0] != 'y':
return False
else:
return True
# end stealage
def parseDate(s):
tuple = time.strptime(s, "%Y-%m-%d")
return date.fromtimestamp(time.mktime(tuple))
def cliOptions():
usage = "%prog <URL> [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d", "--days", action="store", default=30,
type="int", dest="days", help="Your RHN server.")
parser.add_option("--delete", action="store_true", default=False,
dest="delete",
help="Delete these registrations from RHN.")
parser.add_option("--noconfirm", action="store_true", default=False,
dest="noconfirm",
help="Don't ask for delete confirmation.")
if len(sys.argv) == 1:
parser.print_help()
opts, args = parser.parse_args(sys.argv)
if len(args) != 2:
print "You must provide the URL to your RHN server."
parser.print_help()
sys.exit(1)
# first arg is name of the program
opts.server = args[1]
return opts
def search(rhn, days):
s = rhn.server
delta = timedelta(days=days)
today = date.today()
oldsystems = []
systems = s.system.list_user_systems(rhn.session)
for system in systems:
#sys.stderr.write("Working on: %s ID: %s\n" % \
# (system["name"], system["id"]))
d = parseDate(system["last_checkin"])
if today - delta > d:
# This machine hasn't checked in
oldsystems.append(system)
return oldsystems
def delete(rhn, list, noconfirm=False):
for server in list:
print "Removing %s..." % server["name"]
if noconfirm or userconfirm():
ret = rhn.server.system.deleteSystems(rhn.session,
int(server["id"]))
if ret != 1:
print "Removing %s failed with error code: %s" % \
(server["name"], ret)
else:
print "Skipping %s" % server["name"]
def main():
print "Search and Destroy old RHN registrations."
print
o = cliOptions()
rhn = RHNClient(o.server)
rhn.connect()
print "RHN API Version: %s" % rhn.server.api.system_version()
print "Today's date = %s" % date.today().isoformat()
print
list = search(rhn, o.days)
for s in list:
print s["name"]
print "There are %s inactive systems." % len(list)
if o.delete:
print "Going to delete these registrations. Hit ^C to abort now!"
time.sleep(5)
delete(rhn, list, o.noconfirm)
if __name__ == "__main__":
main()
| gpl-2.0 |
daspecster/google-cloud-python | spanner/google/cloud/spanner/streamed.py | 1 | 8653 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for streaming results."""
from google.protobuf.struct_pb2 import ListValue
from google.protobuf.struct_pb2 import Value
from google.cloud.proto.spanner.v1 import type_pb2
import six
# pylint: disable=ungrouped-imports
from google.cloud.spanner._helpers import _parse_value_pb
# pylint: enable=ungrouped-imports
class StreamedResultSet(object):
"""Process a sequence of partial result sets into a single set of row data.
:type response_iterator:
:param response_iterator:
Iterator yielding
:class:`google.cloud.proto.spanner.v1.result_set_pb2.PartialResultSet`
instances.
"""
def __init__(self, response_iterator):
self._response_iterator = response_iterator
self._rows = [] # Fully-processed rows
self._counter = 0 # Counter for processed responses
self._metadata = None # Until set from first PRS
self._stats = None # Until set from last PRS
self._resume_token = None # To resume from last received PRS
self._current_row = [] # Accumulated values for incomplete row
self._pending_chunk = None # Incomplete value
@property
def rows(self):
"""Fully-processed rows.
:rtype: list of row-data lists.
:returns: list of completed row data, from proceesd PRS responses.
"""
return self._rows
@property
def fields(self):
"""Field descriptors for result set columns.
:rtype: list of :class:`~google.cloud.proto.spanner.v1.type_pb2.Field`
:returns: list of fields describing column names / types.
"""
return self._metadata.row_type.fields
@property
def metadata(self):
"""Result set metadata
:rtype: :class:`~.result_set_pb2.ResultSetMetadata`
:returns: structure describing the results
"""
return self._metadata
@property
def stats(self):
"""Result set statistics
:rtype:
:class:`~google.cloud.proto.spanner.v1.result_set_pb2.ResultSetStats`
:returns: structure describing status about the response
"""
return self._stats
@property
def resume_token(self):
"""Token for resuming interrupted read / query.
:rtype: bytes
:returns: token from last chunk of results.
"""
return self._resume_token
def _merge_chunk(self, value):
"""Merge pending chunk with next value.
:type value: :class:`~google.protobuf.struct_pb2.Value`
:param value: continuation of chunked value from previous
partial result set.
:rtype: :class:`~google.protobuf.struct_pb2.Value`
:returns: the merged value
"""
current_column = len(self._current_row)
field = self.fields[current_column]
merged = _merge_by_type(self._pending_chunk, value, field.type)
self._pending_chunk = None
return merged
def _merge_values(self, values):
"""Merge values into rows.
:type values: list of :class:`~google.protobuf.struct_pb2.Value`
:param values: non-chunked values from partial result set.
"""
width = len(self.fields)
for value in values:
index = len(self._current_row)
field = self.fields[index]
self._current_row.append(_parse_value_pb(value, field.type))
if len(self._current_row) == width:
self._rows.append(self._current_row)
self._current_row = []
def consume_next(self):
"""Consume the next partial result set from the stream.
Parse the result set into new/existing rows in :attr:`_rows`
"""
response = six.next(self._response_iterator)
self._counter += 1
self._resume_token = response.resume_token
if self._metadata is None: # first response
self._metadata = response.metadata
if response.HasField('stats'): # last response
self._stats = response.stats
values = list(response.values)
if self._pending_chunk is not None:
values[0] = self._merge_chunk(values[0])
if response.chunked_value:
self._pending_chunk = values.pop()
self._merge_values(values)
def consume_all(self):
"""Consume the streamed responses until there are no more."""
while True:
try:
self.consume_next()
except StopIteration:
break
def __iter__(self):
iter_rows, self._rows[:] = self._rows[:], ()
while True:
if len(iter_rows) == 0:
self.consume_next() # raises StopIteration
iter_rows, self._rows[:] = self._rows[:], ()
while iter_rows:
yield iter_rows.pop(0)
class Unmergeable(ValueError):
"""Unable to merge two values.
:type lhs: :class:`google.protobuf.struct_pb2.Value`
:param lhs: pending value to be merged
:type rhs: :class:`google.protobuf.struct_pb2.Value`
:param rhs: remaining value to be merged
:type type_: :class:`google.cloud.proto.spanner.v1.type_pb2.Type`
:param type_: field type of values being merged
"""
def __init__(self, lhs, rhs, type_):
message = "Cannot merge %s values: %s %s" % (
type_pb2.TypeCode.Name(type_.code), lhs, rhs)
super(Unmergeable, self).__init__(message)
def _unmergeable(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
raise Unmergeable(lhs, rhs, type_)
def _merge_float64(lhs, rhs, type_): # pylint: disable=unused-argument
"""Helper for '_merge_by_type'."""
lhs_kind = lhs.WhichOneof('kind')
if lhs_kind == 'string_value':
return Value(string_value=lhs.string_value + rhs.string_value)
rhs_kind = rhs.WhichOneof('kind')
array_continuation = (
lhs_kind == 'number_value' and
rhs_kind == 'string_value' and
rhs.string_value == '')
if array_continuation:
return lhs
raise Unmergeable(lhs, rhs, type_)
def _merge_string(lhs, rhs, type_): # pylint: disable=unused-argument
"""Helper for '_merge_by_type'."""
return Value(string_value=lhs.string_value + rhs.string_value)
_UNMERGEABLE_TYPES = (type_pb2.BOOL,)
def _merge_array(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
element_type = type_.array_element_type
if element_type.code in _UNMERGEABLE_TYPES:
# Individual values cannot be merged, just concatenate
lhs.list_value.values.extend(rhs.list_value.values)
return lhs
lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)
first = rhs.pop(0)
if first.HasField('null_value'): # can't merge
lhs.append(first)
else:
last = lhs.pop()
try:
merged = _merge_by_type(last, first, element_type)
except Unmergeable:
lhs.append(last)
lhs.append(first)
else:
lhs.append(merged)
return Value(list_value=ListValue(values=(lhs + rhs)))
def _merge_struct(lhs, rhs, type_):
"""Helper for '_merge_by_type'."""
fields = type_.struct_type.fields
lhs, rhs = list(lhs.list_value.values), list(rhs.list_value.values)
candidate_type = fields[len(lhs) - 1].type
first = rhs.pop(0)
if (first.HasField('null_value') or
candidate_type.code in _UNMERGEABLE_TYPES):
lhs.append(first)
else:
last = lhs.pop()
lhs.append(_merge_by_type(last, first, candidate_type))
return Value(list_value=ListValue(values=lhs + rhs))
_MERGE_BY_TYPE = {
type_pb2.BOOL: _unmergeable,
type_pb2.INT64: _merge_string,
type_pb2.FLOAT64: _merge_float64,
type_pb2.STRING: _merge_string,
type_pb2.ARRAY: _merge_array,
type_pb2.STRUCT: _merge_struct,
}
def _merge_by_type(lhs, rhs, type_):
"""Helper for '_merge_chunk'."""
merger = _MERGE_BY_TYPE[type_.code]
return merger(lhs, rhs, type_)
| apache-2.0 |
ajoaoff/django | tests/admin_filters/models.py | 98 | 2149 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
year = models.PositiveIntegerField(null=True, blank=True)
author = models.ForeignKey(
User,
models.SET_NULL,
verbose_name="Verbose Author",
related_name='books_authored',
blank=True, null=True,
)
contributors = models.ManyToManyField(
User,
verbose_name="Verbose Contributors",
related_name='books_contributed',
blank=True,
)
is_best_seller = models.NullBooleanField(default=0)
date_registered = models.DateField(null=True)
# This field name is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name='number', blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
description = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return self.description
@python_2_unicode_compatible
class Employee(models.Model):
department = models.ForeignKey(Department, models.CASCADE, to_field="code")
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
| bsd-3-clause |
kingvuplus/ts-gui-3 | lib/python/Plugins/Extensions/DVDBurn/DVDTitle.py | 24 | 6506 | from Components.config import config, ConfigSubsection, ConfigSubList, ConfigInteger, ConfigText, ConfigSelection, getConfigListEntry, ConfigSequence, ConfigYesNo
import TitleCutter
class ConfigFixedText(ConfigText):
def __init__(self, text, visible_width=60):
ConfigText.__init__(self, default = text, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
class DVDTitle:
def __init__(self, project):
self.properties = ConfigSubsection()
self.properties.menutitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.menusubtitle = ConfigText(fixed_size = False, visible_width = 80)
self.properties.aspect = ConfigSelection(choices = [("4:3", _("4:3")), ("16:9", _("16:9"))])
self.properties.widescreen = ConfigSelection(choices = [("nopanscan", "nopanscan"), ("noletterbox", "noletterbox")])
self.properties.autochapter = ConfigInteger(default = 0, limits = (0, 60))
self.properties.audiotracks = ConfigSubList()
self.DVBname = _("Title")
self.DVBdescr = _("Description")
self.DVBchannel = _("Channel")
self.cuesheet = [ ]
self.source = None
self.filesize = 0
self.estimatedDiskspace = 0
self.inputfile = ""
self.cutlist = [ ]
self.chaptermarks = [ ]
self.timeCreate = None
self.VideoType = -1
self.project = project
self.length = 0
def addService(self, service):
from os import path
from enigma import eServiceCenter, iServiceInformation
from ServiceReference import ServiceReference
from time import localtime, time
self.source = service
serviceHandler = eServiceCenter.getInstance()
info = serviceHandler.info(service)
sDescr = info and info.getInfoString(service, iServiceInformation.sDescription) or ""
self.DVBdescr = sDescr
sTimeCreate = info.getInfo(service, iServiceInformation.sTimeCreate)
if sTimeCreate > 1:
self.timeCreate = localtime(sTimeCreate)
serviceref = ServiceReference(info.getInfoString(service, iServiceInformation.sServiceref))
name = info and info.getName(service) or "Title" + sDescr
self.DVBname = name
self.DVBchannel = serviceref.getServiceName()
self.inputfile = service.getPath()
self.filesize = path.getsize(self.inputfile)
self.estimatedDiskspace = self.filesize
self.length = info.getLength(service)
def addFile(self, filename):
from enigma import eServiceReference
ref = eServiceReference(1, 0, filename)
self.addService(ref)
self.project.session.openWithCallback(self.titleEditDone, TitleCutter.CutlistReader, self)
def titleEditDone(self, cutlist):
self.initDVDmenuText(len(self.project.titles))
self.cuesheet = cutlist
self.produceFinalCuesheet()
def initDVDmenuText(self, track):
s = self.project.menutemplate.settings
self.properties.menutitle.setValue(self.formatDVDmenuText(s.titleformat.getValue(), track))
self.properties.menusubtitle.setValue(self.formatDVDmenuText(s.subtitleformat.getValue(), track))
def formatDVDmenuText(self, template, track):
template = template.replace("$i", str(track))
template = template.replace("$t", self.DVBname)
template = template.replace("$d", self.DVBdescr)
template = template.replace("$c", str(len(self.chaptermarks)+1))
template = template.replace("$f", self.inputfile)
template = template.replace("$C", self.DVBchannel)
#if template.find("$A") >= 0:
from TitleProperties import languageChoices
audiolist = [ ]
for audiotrack in self.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
trackstring = audiotrack.format.getValue()
language = audiotrack.language.getValue()
if languageChoices.langdict.has_key(language):
trackstring += ' (' + languageChoices.langdict[language] + ')'
audiolist.append(trackstring)
audiostring = ', '.join(audiolist)
template = template.replace("$A", audiostring)
if template.find("$l") >= 0:
l = self.length
lengthstring = "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
template = template.replace("$l", lengthstring)
if self.timeCreate:
template = template.replace("$Y", str(self.timeCreate[0]))
template = template.replace("$M", str(self.timeCreate[1]))
template = template.replace("$D", str(self.timeCreate[2]))
timestring = "%d:%02d" % (self.timeCreate[3], self.timeCreate[4])
template = template.replace("$T", timestring)
else:
template = template.replace("$Y", "").replace("$M", "").replace("$D", "").replace("$T", "")
return template
def produceFinalCuesheet(self):
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
accumulated_in = 0
accumulated_at = 0
last_in = 0
self.cutlist = [ ]
self.chaptermarks = [ ]
# our demuxer expects *strictly* IN,OUT lists.
currently_in = not any(type == CUT_TYPE_IN for pts, type in self.cuesheet)
if currently_in:
self.cutlist.append(0) # emulate "in" at first
for (pts, type) in self.cuesheet:
#print "pts=", pts, "type=", type, "accumulated_in=", accumulated_in, "accumulated_at=", accumulated_at, "last_in=", last_in
if type == CUT_TYPE_IN and not currently_in:
self.cutlist.append(pts)
last_in = pts
currently_in = True
if type == CUT_TYPE_OUT and currently_in:
self.cutlist.append(pts)
# accumulate the segment
accumulated_in += pts - last_in
accumulated_at = pts
currently_in = False
if type == CUT_TYPE_MARK and currently_in:
# relocate chaptermark against "in" time. This is not 100% accurate,
# as the in/out points are not.
reloc_pts = pts - last_in + accumulated_in
self.chaptermarks.append(reloc_pts)
if len(self.cutlist) > 1:
part = accumulated_in / (self.length*90000.0)
usedsize = int ( part * self.filesize )
self.estimatedDiskspace = usedsize
self.length = accumulated_in / 90000
def getChapterMarks(self, template="$h:$m:$s.$t"):
timestamps = [ ]
chapters = [ ]
minutes = self.properties.autochapter.getValue()
if len(self.chaptermarks) < 1 and minutes > 0:
chapterpts = 0
while chapterpts < (self.length-60*minutes)*90000:
chapterpts += 90000 * 60 * minutes
chapters.append(chapterpts)
else:
chapters = self.chaptermarks
for p in chapters:
timestring = template.replace("$h", str(p / (90000 * 3600)))
timestring = timestring.replace("$m", ("%02d" % (p % (90000 * 3600) / (90000 * 60))))
timestring = timestring.replace("$s", ("%02d" % (p % (90000 * 60) / 90000)))
timestring = timestring.replace("$t", ("%03d" % ((p % 90000) / 90)))
timestamps.append(timestring)
return timestamps
| gpl-2.0 |
ademmers/ansible | test/integration/targets/module_precedence/multiple_roles/bar/library/ping.py | 35 | 2234 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ping
version_added: historical
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
description:
- A trivial test module, this module always returns C(pong) on successful
contact. It does not make sense in playbooks, but it is useful from
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
- This is NOT ICMP ping, this is just a trivial test module.
options: {}
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
# Test we can logon to 'webservers' and execute python with json lib.
ansible webservers -m ping
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
data=dict(required=False, default=None),
),
supports_check_mode=True
)
result = dict(ping='pong')
if module.params['data']:
if module.params['data'] == 'crash':
raise Exception("boom")
result['ping'] = module.params['data']
result['location'] = 'role: bar'
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
newrocknj/horizon | openstack_dashboard/dashboards/admin/hypervisors/tables.py | 32 | 3125 | # Copyright 2013 B1 Systems GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.templatetags import sizeformat
class AdminHypervisorsTable(tables.DataTable):
hostname = tables.Column("hypervisor_hostname",
link="horizon:admin:hypervisors:detail",
attrs={'data-type': 'naturalSort'},
verbose_name=_("Hostname"))
hypervisor_type = tables.Column("hypervisor_type",
verbose_name=_("Type"))
vcpus_used = tables.Column("vcpus_used",
verbose_name=_("VCPUs (used)"))
vcpus = tables.Column("vcpus",
verbose_name=_("VCPUs (total)"))
memory_used = tables.Column('memory_mb_used',
verbose_name=_("RAM (used)"),
attrs={'data-type': 'size'},
filters=(sizeformat.mb_float_format,))
memory = tables.Column('memory_mb',
verbose_name=_("RAM (total)"),
attrs={'data-type': 'size'},
filters=(sizeformat.mb_float_format,))
local_used = tables.Column('local_gb_used',
verbose_name=_("Local Storage (used)"),
attrs={'data-type': 'size'},
filters=(sizeformat.diskgbformat,))
local = tables.Column('local_gb',
verbose_name=_("Local Storage (total)"),
attrs={'data-type': 'size'},
filters=(sizeformat.diskgbformat,))
running_vms = tables.Column("running_vms",
verbose_name=_("Instances"))
def get_object_id(self, hypervisor):
return "%s_%s" % (hypervisor.id,
hypervisor.hypervisor_hostname)
class Meta(object):
name = "hypervisors"
verbose_name = _("Hypervisors")
class AdminHypervisorInstancesTable(tables.DataTable):
name = tables.Column("name",
link="horizon:admin:instances:detail",
verbose_name=_("Instance Name"))
instance_id = tables.Column("uuid",
verbose_name=_("Instance ID"))
def get_object_id(self, server):
return server['uuid']
class Meta(object):
name = "hypervisor_instances"
verbose_name = _("Hypervisor Instances")
| apache-2.0 |
wehkamp/ansible-modules-core | system/setup.py | 64 | 5222 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: setup
version_added: historical
short_description: Gathers facts about remote hosts
options:
filter:
version_added: "1.1"
description:
- if supplied, only return facts that match this shell-style (fnmatch) wildcard.
required: false
default: '*'
fact_path:
version_added: "1.3"
description:
- path used for local ansible facts (*.fact) - files in this dir
will be run (if executable) and their results be added to ansible_local facts
if a file is not executable it is read.
File/results format can be json or ini-format
required: false
default: '/etc/ansible/facts.d'
description:
- This module is automatically called by playbooks to gather useful
variables about remote hosts that can be used in playbooks. It can also be
executed directly by C(/usr/bin/ansible) to check what variables are
available to a host. Ansible provides many I(facts) about the system,
automatically.
notes:
- More ansible facts will be added with successive releases. If I(facter) or
I(ohai) are installed, variables from these programs will also be snapshotted
into the JSON file for usage in templating. These variables are prefixed
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
bubbled up to the caller. Using the ansible facts and choosing to not
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
remote systems. (See also M(facter) and M(ohai).)
- The filter option filters only the first level subkey below ansible_facts.
- If the target host is Windows, you will not currently have the ability to use
C(fact_path) or C(filter) as this is provided by a simpler implementation of the module.
Different facts are returned for Windows hosts.
author: Michael DeHaan
'''
EXAMPLES = """
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
ansible all -m setup --tree /tmp/facts
# Display only facts regarding memory found by ansible on all hosts and output them.
ansible all -m setup -a 'filter=ansible_*_mb'
# Display only facts returned by facter.
ansible all -m setup -a 'filter=facter_*'
# Display only facts about certain interfaces.
ansible all -m setup -a 'filter=ansible_eth[0-2]'
"""
def run_setup(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --puppet --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['verbose_override'] = True
return setup_result
def main():
global module
module = AnsibleModule(
argument_spec = dict(
filter=dict(default="*", required=False),
fact_path=dict(default='/etc/ansible/facts.d', required=False),
),
supports_check_mode = True,
)
data = run_setup(module)
module.exit_json(**data)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.facts import *
main()
| gpl-3.0 |
halberom/ansible | lib/ansible/plugins/lookup/file.py | 131 | 1954 | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
display.debug("File lookup term: %s" % term)
# Find the file in the expected search path
lookupfile = self.find_file_in_search_path(variables, 'files', term)
display.vvvv(u"File lookup using %s as file" % lookupfile)
try:
if lookupfile:
b_contents, show_data = self._loader._get_file_contents(lookupfile)
contents = to_text(b_contents, errors='surrogate_or_strict')
ret.append(contents.rstrip())
else:
raise AnsibleParserError()
except AnsibleParserError:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
| gpl-3.0 |
detrout/debian-statsmodels | statsmodels/examples/ex_feasible_gls_het_0.py | 34 | 6454 | # -*- coding: utf-8 -*-
"""Examples for linear model with heteroscedasticity estimated by feasible GLS
These are examples to check the results during developement.
The assumptions:
We have a linear model y = X*beta where the variance of an observation depends
on some explanatory variable Z (`exog_var`).
linear_model.WLS estimated the model for a given weight matrix
here we want to estimate also the weight matrix by two step or iterative WLS
Created on Wed Dec 21 12:28:17 2011
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.regression.feasible_gls import GLSHet, GLSHet2
from statsmodels.tools.tools import add_constant
examples = ['ex1']
if 'ex1' in examples:
nsample = 300 #different pattern last graph with 100 or 200 or 500
sig = 0.5
np.random.seed(9876789) #9876543)
X = np.random.randn(nsample, 3)
X = np.column_stack((np.ones((nsample,1)), X))
beta = [1, 0.5, -0.5, 1.]
y_true2 = np.dot(X, beta)
x1 = np.linspace(0, 1, nsample)
gamma = np.array([1, 3.])
#with slope 3 instead of two, I get negative weights, Not correct
# - was misspecified, but the negative weights are still possible with identity link
#gamma /= gamma.sum() #normalize assuming x1.max is 1
z_true = add_constant(x1)
winv = np.dot(z_true, gamma)
het_params = sig**2 * np.array([1, 3.]) # for squared
sig2_het = sig**2 * winv
weights_dgp = 1/winv
weights_dgp /= weights_dgp.max() #should be already normalized - NOT check normalization
#y2[:nsample*6/10] = y_true2[:nsample*6/10] + sig*1. * np.random.normal(size=nsample*6/10)
z0 = np.zeros(nsample)
z0[(nsample * 5)//10:] = 1 #dummy for 2 halfs of sample
z0 = add_constant(z0)
z1 = add_constant(x1)
noise = np.sqrt(sig2_het) * np.random.normal(size=nsample)
y2 = y_true2 + noise
X2 = X[:,[0,2]] #misspecified, missing regressor in main equation
X2 = X #correctly specigied
res_ols = OLS(y2, X2).fit()
print('OLS beta estimates')
print(res_ols.params)
print('OLS stddev of beta')
print(res_ols.bse)
print('\nWLS')
mod0 = GLSHet2(y2, X2, exog_var=winv)
res0 = mod0.fit()
print('new version')
mod1 = GLSHet(y2, X2, exog_var=winv)
res1 = mod1.iterative_fit(2)
print('WLS beta estimates')
print(res1.params)
print(res0.params)
print('WLS stddev of beta')
print(res1.bse)
#compare with previous version GLSHet2, refactoring check
#assert_almost_equal(res1.params, np.array([ 0.37642521, 1.51447662]))
#this fails ??? more iterations? different starting weights?
print(res1.model.weights/res1.model.weights.max())
#why is the error so small in the estimated weights ?
assert_almost_equal(res1.model.weights/res1.model.weights.max(), weights_dgp, 14)
print('residual regression params')
print(res1.results_residual_regression.params)
print('scale of model ?')
print(res1.scale)
print('unweighted residual variance, note unweighted mean is not zero')
print(res1.resid.var())
#Note weighted mean is zero:
#(res1.model.weights * res1.resid).mean()
doplots = True #False
if doplots:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, 'r-', label='fwls')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
#the next only works if w has finite support, discrete/categorical
#z = (w[:,None] == [1,4]).astype(float) #dummy variable
#z = (w0[:,None] == np.unique(w0)).astype(float) #dummy variable
#changed z0 contains dummy and constant
mod2 = GLSHet(y2, X2, exog_var=z0)
res2 = mod2.iterative_fit(3)
print(res2.params)
import statsmodels.api as sm
#z = sm.add_constant(w, prepend=True)
z = sm.add_constant(x1/x1.max())
mod3 = GLSHet(y2, X2, exog_var=z1)#, link=sm.families.links.log())
res3 = mod3.iterative_fit(20)
error_var_3 = res3.mse_resid/res3.model.weights
print(res3.params)
print("np.array(res3.model.history['ols_params'])")
print(np.array(res3.model.history['ols_params']))
print("np.array(res3.model.history['self_params'])")
print(np.array(res3.model.history['self_params']))
#Models 2 and 3 are equivalent with different parameterization of Z
print(np.unique(res2.model.weights)) #for discrete z only, only a few uniques
print(np.unique(res3.model.weights))
print(res3.summary())
print('\n\nResults of estimation of weights')
print('--------------------------------')
print(res3.results_residual_regression.summary())
if doplots:
plt.figure()
plt.plot(x1, y2, 'o')
plt.plot(x1, y_true2, 'b-', label='true')
plt.plot(x1, res1.fittedvalues, '-', label='fwls1')
plt.plot(x1, res2.fittedvalues, '-', label='fwls2')
plt.plot(x1, res3.fittedvalues, '-', label='fwls3')
plt.plot(x1, res_ols.fittedvalues, '--', label='ols')
plt.legend()
plt.figure()
plt.ylim(0, 5)
res_e2 = OLS(noise**2, z).fit()
plt.plot(noise**2, 'bo', alpha=0.5, label='dgp error**2')
plt.plot(res_e2.fittedvalues, lw=2, label='ols for noise**2')
#plt.plot(res3.model.weights, label='GLSHet weights')
plt.plot(error_var_3, lw=2, label='GLSHet error var')
plt.plot(res3.resid**2, 'ro', alpha=0.5, label='resid squared')
#plt.plot(weights_dgp, label='DGP weights')
plt.plot(sig**2 * winv, lw=2, label='DGP error var')
plt.legend()
plt.show()
'''Note these are close but maybe biased because of skewed distribution
>>> res3.mse_resid/res3.model.weights[-10:]
array([ 1.03115871, 1.03268209, 1.03420547, 1.03572885, 1.03725223,
1.03877561, 1.04029899, 1.04182237, 1.04334575, 1.04486913])
>>> res_e2.fittedvalues[-10:]
array([ 1.0401953 , 1.04171386, 1.04323242, 1.04475098, 1.04626954,
1.0477881 , 1.04930666, 1.05082521, 1.05234377, 1.05386233])
>>> sig**2 * w[-10:]
array([ 0.98647295, 0.98797595, 0.98947896, 0.99098196, 0.99248497,
0.99398798, 0.99549098, 0.99699399, 0.99849699, 1. ])
'''
| bsd-3-clause |
mrshu/iepy | iepy/instantiation/iepy_runner.py | 1 | 6152 | """
Run IEPY active-learning extractor
Usage:
iepy_runner.py [options] <relation_name> <output>
iepy_runner.py [options] --db-store <relation_name>
iepy_runner.py -h | --help | --version
Options:
--store-extractor=<extractor_output> Stores the trained classifier
--trained-extractor=<extractor_path> Load an already trained extractor
--db-store Stores the predictions on the database
--no-questions Won't generate questions to answer. Will predict
as is. Should be used with --trained-extractor
--tune-for=<tune-for> Predictions tuning. Options are high-prec
or high-recall [default: high-prec]
--extractor-config=<config.json> Sets the extractor config
--version Version number
-h --help Show this screen
"""
import os
import json
import logging
from docopt import docopt
from sys import exit
import iepy
INSTANCE_PATH = iepy.setup(__file__)
from iepy.extraction.active_learning_core import ActiveLearningCore, HIPREC, HIREC
from iepy.data.db import CandidateEvidenceManager
from iepy.data.models import Relation
from iepy.extraction.terminal import TerminalAdministration
from iepy.data import output
def print_all_relations():
print("All available relations:")
for relation in Relation.objects.all():
print(" {}".format(relation))
def load_labeled_evidences(relation, evidences):
CEM = CandidateEvidenceManager # shorcut
return CEM.labels_for(relation, evidences, CEM.conflict_resolution_newest_wins)
def _get_tuning_mode(opts):
if opts['--tune-for'] == 'high-prec':
tuning_mode = HIPREC
elif opts['--tune-for'] == 'high-recall':
tuning_mode = HIREC
else:
print ('Invalid tuning mode')
print (__doc__)
exit(1)
return tuning_mode
def _get_relation(opts):
relation_name = opts['<relation_name>']
try:
relation = Relation.objects.get(name=relation_name)
except Relation.DoesNotExist:
print("Relation {!r} non existent".format(relation_name))
print_all_relations()
exit(1)
return relation
def _load_extractor(opts, relation, labeled_evidences):
extractor_path = opts.get('--trained-extractor')
try:
iextractor = ActiveLearningCore.load(extractor_path,
labeled_evidences=labeled_evidences)
except ValueError:
print("Error: unable to load extractor, invalid file")
exit(1)
if iextractor.relation != relation:
print('The loaded extractor is not for the requested relation'
' but for relation {} instead'.format(iextractor.relation))
exit(1)
print('Extractor successfully loaded')
return iextractor
def _construct_extractor(opts, relation, labeled_evidences, tuning_mode):
config_filepath = opts.get("--extractor-config")
if not config_filepath:
config_filepath = os.path.join(INSTANCE_PATH, "extractor_config.json")
if not os.path.exists(config_filepath):
print("Error: extractor config does not exists, please create the "
"file extractor_config.json or use the --extractor-config")
exit(1)
with open(config_filepath) as filehandler:
try:
extractor_config = json.load(filehandler)
except Exception as error:
print("Error: unable to load extractor config: {}".format(error))
exit(1)
iextractor = ActiveLearningCore(
relation, labeled_evidences, extractor_config, tradeoff=tuning_mode
)
return iextractor
def run_from_command_line():
opts = docopt(__doc__, version=iepy.__version__)
logging.basicConfig(level=logging.INFO, format='%(message)s')
logging.getLogger("featureforge").setLevel(logging.WARN)
tuning_mode = _get_tuning_mode(opts)
relation = _get_relation(opts)
candidates = CandidateEvidenceManager.candidates_for_relation(relation)
labeled_evidences = load_labeled_evidences(relation, candidates)
if opts.get('--trained-extractor'):
iextractor = _load_extractor(opts, relation, labeled_evidences)
was_ever_trained = True
opts["--no-questions"] = True
else:
iextractor = _construct_extractor(opts, relation, labeled_evidences, tuning_mode)
iextractor.start()
was_ever_trained = False
if not opts.get("--no-questions", False):
questions_loop(iextractor, relation, was_ever_trained)
# Predict and store output
predictions = iextractor.predict(candidates) # asking predictions for EVERYTHING
if not predictions:
print("Nothing was predicted")
exit(1)
if opts.get("--db-store"):
output.dump_predictions_to_database(relation, predictions)
output_file = opts.get("<output>")
if output_file:
output.dump_runner_output_to_csv(predictions, output_file)
classifier_output = opts.get("--store-extractor")
if classifier_output:
iextractor.save(classifier_output)
def questions_loop(iextractor, relation, was_ever_trained):
STOP = u'STOP'
term = TerminalAdministration(
relation,
extra_options=[(STOP, u'Stop execution')]
)
while iextractor.questions:
questions = list(iextractor.questions) # copying the list
term.update_candidate_evidences_to_label(questions)
result = term()
i = 0
for c, label_value in load_labeled_evidences(relation, questions).items():
if label_value is not None:
iextractor.add_answer(c, label_value)
i += 1
print ('Added %s new human labels to the extractor core' % i)
iextractor.process()
was_ever_trained = True
if result == STOP:
break
if not was_ever_trained:
# It's needed to run some process before asking for predictions
iextractor.process()
if __name__ == u'__main__':
run_from_command_line()
| bsd-3-clause |
lctseng/NCTU-SDN-Project | openvswitch-2.3.0/python/ovstest/tests.py | 6 | 9327 | import math
import time
import ovstest.util as util
DEFAULT_TEST_BRIDGE = "ovstestbr0"
DEFAULT_TEST_PORT = "ovstestport0"
DEFAULT_TEST_TUN = "ovstestport1"
NO_HANDLE = -1
def do_udp_tests(receiver, sender, tbwidth, duration, port_sizes):
"""Schedule UDP tests between receiver and sender"""
server1 = util.rpc_client(receiver[0], receiver[1])
server2 = util.rpc_client(sender[0], sender[1])
udpformat = '{0:>15} {1:>15} {2:>15} {3:>15} {4:>15}'
print ("UDP test from %s:%u to %s:%u with target bandwidth %s" %
(sender[0], sender[1], receiver[0], receiver[1],
util.bandwidth_to_string(tbwidth)))
print udpformat.format("Datagram Size", "Snt Datagrams", "Rcv Datagrams",
"Datagram Loss", "Bandwidth")
for size in port_sizes:
listen_handle = NO_HANDLE
send_handle = NO_HANDLE
try:
packetcnt = (tbwidth * duration) / size
listen_handle = server1.create_udp_listener(receiver[3])
if listen_handle == NO_HANDLE:
print ("Server could not open UDP listening socket on port"
" %u. Try to restart the server.\n" % receiver[3])
return
send_handle = server2.create_udp_sender(
(util.ip_from_cidr(receiver[2]),
receiver[3]), packetcnt, size,
duration)
# Using sleep here because there is no other synchronization
# source that would notify us when all sent packets were received
time.sleep(duration + 1)
rcv_packets = server1.get_udp_listener_results(listen_handle)
snt_packets = server2.get_udp_sender_results(send_handle)
loss = math.ceil(((snt_packets - rcv_packets) * 10000.0) /
snt_packets) / 100
bwidth = (rcv_packets * size) / duration
print udpformat.format(size, snt_packets, rcv_packets,
'%.2f%%' % loss, util.bandwidth_to_string(bwidth))
finally:
if listen_handle != NO_HANDLE:
server1.close_udp_listener(listen_handle)
if send_handle != NO_HANDLE:
server2.close_udp_sender(send_handle)
print "\n"
def do_tcp_tests(receiver, sender, duration):
"""Schedule TCP tests between receiver and sender"""
server1 = util.rpc_client(receiver[0], receiver[1])
server2 = util.rpc_client(sender[0], sender[1])
tcpformat = '{0:>15} {1:>15} {2:>15}'
print "TCP test from %s:%u to %s:%u (full speed)" % (sender[0], sender[1],
receiver[0], receiver[1])
print tcpformat.format("Snt Bytes", "Rcv Bytes", "Bandwidth")
listen_handle = NO_HANDLE
send_handle = NO_HANDLE
try:
listen_handle = server1.create_tcp_listener(receiver[3])
if listen_handle == NO_HANDLE:
print ("Server was unable to open TCP listening socket on port"
" %u. Try to restart the server.\n" % receiver[3])
return
send_handle = server2.create_tcp_sender(util.ip_from_cidr(receiver[2]),
receiver[3], duration)
time.sleep(duration + 1)
rcv_bytes = long(server1.get_tcp_listener_results(listen_handle))
snt_bytes = long(server2.get_tcp_sender_results(send_handle))
bwidth = rcv_bytes / duration
print tcpformat.format(snt_bytes, rcv_bytes,
util.bandwidth_to_string(bwidth))
finally:
if listen_handle != NO_HANDLE:
server1.close_tcp_listener(listen_handle)
if send_handle != NO_HANDLE:
server2.close_tcp_sender(send_handle)
print "\n"
def do_l3_tests(node1, node2, bandwidth, duration, ps, type):
"""
Do L3 tunneling tests. Each node is given as 4 tuple - physical
interface IP, control port, test IP and test port.
"""
server1 = util.rpc_client(node1[0], node1[1])
server2 = util.rpc_client(node2[0], node2[1])
servers_with_bridges = []
try:
server1.create_bridge(DEFAULT_TEST_BRIDGE)
servers_with_bridges.append(server1)
server2.create_bridge(DEFAULT_TEST_BRIDGE)
servers_with_bridges.append(server2)
server1.interface_up(DEFAULT_TEST_BRIDGE)
server2.interface_up(DEFAULT_TEST_BRIDGE)
server1.interface_assign_ip(DEFAULT_TEST_BRIDGE, node1[2], None)
server2.interface_assign_ip(DEFAULT_TEST_BRIDGE, node2[2], None)
server1.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_TUN)
server2.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_TUN)
server1.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "type",
None, type)
server2.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "type",
None, type)
server1.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "options",
"remote_ip", node2[0])
server2.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "options",
"remote_ip", node1[0])
do_udp_tests(node1, node2, bandwidth, duration, ps)
do_udp_tests(node2, node1, bandwidth, duration, ps)
do_tcp_tests(node1, node2, duration)
do_tcp_tests(node2, node1, duration)
finally:
for server in servers_with_bridges:
server.del_bridge(DEFAULT_TEST_BRIDGE)
def do_vlan_tests(node1, node2, bandwidth, duration, ps, tag):
"""
Do VLAN tests between node1 and node2. Each node is given
as 4 tuple - physical interface IP, control port, test IP and
test port.
"""
server1 = util.rpc_client(node1[0], node1[1])
server2 = util.rpc_client(node2[0], node2[1])
br_name1 = None
br_name2 = None
servers_with_test_ports = []
try:
interface_node1 = server1.get_interface(node1[0])
interface_node2 = server2.get_interface(node2[0])
if server1.is_ovs_bridge(interface_node1):
br_name1 = interface_node1
else:
br_name1 = DEFAULT_TEST_BRIDGE
server1.create_test_bridge(br_name1, interface_node1)
if server2.is_ovs_bridge(interface_node2):
br_name2 = interface_node2
else:
br_name2 = DEFAULT_TEST_BRIDGE
server2.create_test_bridge(br_name2, interface_node2)
server1.add_port_to_bridge(br_name1, DEFAULT_TEST_PORT)
servers_with_test_ports.append(server1)
server2.add_port_to_bridge(br_name2, DEFAULT_TEST_PORT)
servers_with_test_ports.append(server2)
server1.ovs_vsctl_set("Port", DEFAULT_TEST_PORT, "tag", None, tag)
server2.ovs_vsctl_set("Port", DEFAULT_TEST_PORT, "tag", None, tag)
server1.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type", None,
"internal")
server2.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type", None,
"internal")
server1.interface_assign_ip(DEFAULT_TEST_PORT, node1[2], None)
server2.interface_assign_ip(DEFAULT_TEST_PORT, node2[2], None)
server1.interface_up(DEFAULT_TEST_PORT)
server2.interface_up(DEFAULT_TEST_PORT)
do_udp_tests(node1, node2, bandwidth, duration, ps)
do_udp_tests(node2, node1, bandwidth, duration, ps)
do_tcp_tests(node1, node2, duration)
do_tcp_tests(node2, node1, duration)
finally:
for server in servers_with_test_ports:
server.del_port_from_bridge(DEFAULT_TEST_PORT)
if br_name1 == DEFAULT_TEST_BRIDGE:
server1.del_test_bridge(br_name1, interface_node1)
if br_name2 == DEFAULT_TEST_BRIDGE:
server2.del_test_bridge(br_name2, interface_node2)
def do_direct_tests(node1, node2, bandwidth, duration, ps):
"""
Do tests between outer IPs without involving Open vSwitch. Each
node is given as 4 tuple - physical interface IP, control port,
test IP and test port. Direct tests will use physical interface
IP as the test IP address.
"""
n1 = (node1[0], node1[1], node1[0], node1[3])
n2 = (node2[0], node2[1], node2[0], node2[3])
do_udp_tests(n1, n2, bandwidth, duration, ps)
do_udp_tests(n2, n1, bandwidth, duration, ps)
do_tcp_tests(n1, n2, duration)
do_tcp_tests(n2, n1, duration)
def configure_l3(conf, tunnel_mode):
"""
This function creates a temporary test bridge and adds an L3 tunnel.
"""
s = util.start_local_server(conf[1][1])
server = util.rpc_client("127.0.0.1", conf[1][1])
server.create_bridge(DEFAULT_TEST_BRIDGE)
server.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_PORT)
server.interface_up(DEFAULT_TEST_BRIDGE)
server.interface_assign_ip(DEFAULT_TEST_BRIDGE, conf[1][0],
None)
server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type",
None, tunnel_mode)
server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "options",
"remote_ip", conf[0])
return s
| apache-2.0 |
varunarya10/nova_test_latest | nova/api/metadata/base.py | 20 | 20244 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Instance Metadata information."""
import base64
import os
import posixpath
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import availability_zones as az
from nova import block_device
from nova import context
from nova import network
from nova import objects
from nova.objects import keypair as keypair_obj
from nova import utils
from nova.virt import netutils
metadata_opts = [
cfg.StrOpt('config_drive_skip_versions',
default=('1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 '
'2007-12-15 2008-02-01 2008-09-01'),
help='List of metadata versions to skip placing into the '
'config drive'),
cfg.StrOpt('vendordata_driver',
default='nova.api.metadata.vendordata_json.JsonFileVendorData',
help='Driver to use for vendor data'),
]
CONF = cfg.CONF
CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
VERSIONS = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
FOLSOM = '2012-08-10'
GRIZZLY = '2013-04-04'
HAVANA = '2013-10-17'
LIBERTY = '2015-10-15'
OPENSTACK_VERSIONS = [
FOLSOM,
GRIZZLY,
HAVANA,
LIBERTY,
]
VERSION = "version"
CONTENT = "content"
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
VD_JSON_NAME = "vendor_data.json"
NW_JSON_NAME = "network_data.json"
UD_NAME = "user_data"
PASS_NAME = "password"
MIME_TYPE_TEXT_PLAIN = "text/plain"
MIME_TYPE_APPLICATION_JSON = "application/json"
LOG = logging.getLogger(__name__)
class InvalidMetadataVersion(Exception):
pass
class InvalidMetadataPath(Exception):
pass
class InstanceMetadata(object):
"""Instance metadata."""
def __init__(self, instance, address=None, content=None, extra_md=None,
network_info=None, vd_driver=None, network_metadata=None):
"""Creation of this object should basically cover all time consuming
collection. Methods after that should not cause time delays due to
network operations or lengthy cpu operations.
The user should then get a single instance and make multiple method
calls on it.
"""
if not content:
content = []
ctxt = context.get_admin_context()
# The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN
self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
self.instance = instance
self.extra_md = extra_md
self.availability_zone = az.get_instance_availability_zone(ctxt,
instance)
self.security_groups = objects.SecurityGroupList.get_by_instance(
ctxt, instance)
self.mappings = _format_instance_mapping(ctxt, instance)
if instance.user_data is not None:
self.userdata_raw = base64.b64decode(instance.user_data)
else:
self.userdata_raw = None
self.address = address
# expose instance metadata.
self.launch_metadata = utils.instance_meta(instance)
self.password = password.extract_password(instance)
self.uuid = instance.uuid
self.content = {}
self.files = []
# get network info, and the rendered network template
if network_info is None:
network_info = instance.info_cache.network_info
# expose network metadata
if network_metadata is None:
self.network_metadata = netutils.get_network_metadata(network_info)
else:
self.network_metadata = network_metadata
self.ip_info = \
ec2utils.get_ip_info_for_instance_from_nw_info(network_info)
self.network_config = None
cfg = netutils.get_injected_network_template(network_info)
if cfg:
key = "%04i" % len(self.content)
self.content[key] = cfg
self.network_config = {"name": "network_config",
'content_path': "/%s/%s" % (CONTENT_DIR, key)}
# 'content' is passed in from the configdrive code in
# nova/virt/libvirt/driver.py. That's how we get the injected files
# (personalities) in. AFAIK they're not stored in the db at all,
# so are not available later (web service metadata time).
for (path, contents) in content:
key = "%04i" % len(self.content)
self.files.append({'path': path,
'content_path': "/%s/%s" % (CONTENT_DIR, key)})
self.content[key] = contents
if vd_driver is None:
vdclass = importutils.import_class(CONF.vendordata_driver)
else:
vdclass = vd_driver
self.vddriver = vdclass(instance=instance, address=address,
extra_md=extra_md, network_info=network_info)
self.route_configuration = None
def _route_configuration(self):
if self.route_configuration:
return self.route_configuration
path_handlers = {UD_NAME: self._user_data,
PASS_NAME: self._password,
VD_JSON_NAME: self._vendor_data,
MD_JSON_NAME: self._metadata_as_json,
NW_JSON_NAME: self._network_data,
VERSION: self._handle_version,
CONTENT: self._handle_content}
self.route_configuration = RouteConfiguration(path_handlers)
return self.route_configuration
def set_mimetype(self, mime_type):
self.md_mimetype = mime_type
def get_mimetype(self):
return self.md_mimetype
def get_ec2_metadata(self, version):
if version == "latest":
version = VERSIONS[-1]
if version not in VERSIONS:
raise InvalidMetadataVersion(version)
hostname = self._get_hostname()
floating_ips = self.ip_info['floating_ips']
floating_ip = floating_ips and floating_ips[0] or ''
fixed_ips = self.ip_info['fixed_ips']
fixed_ip = fixed_ips and fixed_ips[0] or ''
fmt_sgroups = [x['name'] for x in self.security_groups]
meta_data = {
'ami-id': self.instance.ec2_ids.ami_id,
'ami-launch-index': self.instance.launch_index,
'ami-manifest-path': 'FIXME',
'instance-id': self.instance.ec2_ids.instance_id,
'hostname': hostname,
'local-ipv4': fixed_ip or self.address,
'reservation-id': self.instance.reservation_id,
'security-groups': fmt_sgroups}
# public keys are strangely rendered in ec2 metadata service
# meta-data/public-keys/ returns '0=keyname' (with no trailing /)
# and only if there is a public key given.
# '0=keyname' means there is a normally rendered dict at
# meta-data/public-keys/0
#
# meta-data/public-keys/ : '0=%s' % keyname
# meta-data/public-keys/0/ : 'openssh-key'
# meta-data/public-keys/0/openssh-key : '%s' % publickey
if self.instance.key_name:
meta_data['public-keys'] = {
'0': {'_name': "0=" + self.instance.key_name,
'openssh-key': self.instance.key_data}}
if self._check_version('2007-01-19', version):
meta_data['local-hostname'] = hostname
meta_data['public-hostname'] = hostname
meta_data['public-ipv4'] = floating_ip
if False and self._check_version('2007-03-01', version):
# TODO(vish): store product codes
meta_data['product-codes'] = []
if self._check_version('2007-08-29', version):
instance_type = self.instance.get_flavor()
meta_data['instance-type'] = instance_type['name']
if False and self._check_version('2007-10-10', version):
# TODO(vish): store ancestor ids
meta_data['ancestor-ami-ids'] = []
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings
if self.instance.ec2_ids.kernel_id:
meta_data['kernel-id'] = self.instance.ec2_ids.kernel_id
if self.instance.ec2_ids.ramdisk_id:
meta_data['ramdisk-id'] = self.instance.ec2_ids.ramdisk_id
if self._check_version('2008-02-01', version):
meta_data['placement'] = {'availability-zone':
self.availability_zone}
if self._check_version('2008-09-01', version):
meta_data['instance-action'] = 'none'
data = {'meta-data': meta_data}
if self.userdata_raw is not None:
data['user-data'] = self.userdata_raw
return data
def get_ec2_item(self, path_tokens):
# get_ec2_metadata returns dict without top level version
data = self.get_ec2_metadata(path_tokens[0])
return find_path_in_tree(data, path_tokens[1:])
def get_openstack_item(self, path_tokens):
if path_tokens[0] == CONTENT_DIR:
return self._handle_content(path_tokens)
return self._route_configuration().handle_path(path_tokens)
def _metadata_as_json(self, version, path):
metadata = {'uuid': self.uuid}
if self.launch_metadata:
metadata['meta'] = self.launch_metadata
if self.files:
metadata['files'] = self.files
if self.extra_md:
metadata.update(self.extra_md)
if self.network_config:
metadata['network_config'] = self.network_config
if self.instance.key_name:
metadata['public_keys'] = {
self.instance.key_name: self.instance.key_data
}
keypair = keypair_obj.KeyPair.get_by_name(
context.get_admin_context(), self.instance.user_id,
self.instance.key_name)
metadata['keys'] = [
{'name': keypair.name,
'type': keypair.type,
'data': keypair.public_key}
]
metadata['hostname'] = self._get_hostname()
metadata['name'] = self.instance.display_name
metadata['launch_index'] = self.instance.launch_index
metadata['availability_zone'] = self.availability_zone
if self._check_os_version(GRIZZLY, version):
metadata['random_seed'] = base64.b64encode(os.urandom(512))
if self._check_os_version(LIBERTY, version):
metadata['project_id'] = self.instance.project_id
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
return jsonutils.dumps(metadata)
def _handle_content(self, path_tokens):
if len(path_tokens) == 1:
raise KeyError("no listing for %s" % "/".join(path_tokens))
if len(path_tokens) != 2:
raise KeyError("Too many tokens for /%s" % CONTENT_DIR)
return self.content[path_tokens[1]]
def _handle_version(self, version, path):
# request for /version, give a list of what is available
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
if self._check_os_version(GRIZZLY, version):
ret.append(PASS_NAME)
if self._check_os_version(HAVANA, version):
ret.append(VD_JSON_NAME)
if self._check_os_version(LIBERTY, version):
ret.append(NW_JSON_NAME)
return ret
def _user_data(self, version, path):
if self.userdata_raw is None:
raise KeyError(path)
return self.userdata_raw
def _network_data(self, version, path):
if self.network_metadata is None:
return jsonutils.dumps({})
return jsonutils.dumps(self.network_metadata)
def _password(self, version, path):
if self._check_os_version(GRIZZLY, version):
return password.handle_password
raise KeyError(path)
def _vendor_data(self, version, path):
if self._check_os_version(HAVANA, version):
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
return jsonutils.dumps(self.vddriver.get())
raise KeyError(path)
def _check_version(self, required, requested, versions=VERSIONS):
return versions.index(requested) >= versions.index(required)
def _check_os_version(self, required, requested):
return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance.hostname,
'.' if CONF.dhcp_domain else '',
CONF.dhcp_domain)
def lookup(self, path):
if path == "" or path[0] != "/":
path = posixpath.normpath("/" + path)
else:
path = posixpath.normpath(path)
# Set default mimeType. It will be modified only if there is a change
self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
if path_tokens[0] not in ("ec2", "openstack"):
if path_tokens[0] == "":
# request for /
path_tokens = ["ec2"]
else:
path_tokens = ["ec2"] + path_tokens
path = "/" + "/".join(path_tokens)
# all values of 'path' input starts with '/' and have no trailing /
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
# NOTE(vish): don't show versions that are in the future
today = timeutils.utcnow().strftime("%Y-%m-%d")
versions = [v for v in OPENSTACK_VERSIONS if v <= today]
if OPENSTACK_VERSIONS != versions:
LOG.debug("future versions %s hidden in version list",
[v for v in OPENSTACK_VERSIONS
if v not in versions])
versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
try:
if path_tokens[0] == "openstack":
data = self.get_openstack_item(path_tokens[1:])
else:
data = self.get_ec2_item(path_tokens[1:])
except (InvalidMetadataVersion, KeyError):
raise InvalidMetadataPath(path)
return data
def metadata_for_config_drive(self):
"""Yields (path, value) tuples for metadata elements."""
# EC2 style metadata
for version in VERSIONS + ["latest"]:
if version in CONF.config_drive_skip_versions.split(' '):
continue
data = self.get_ec2_metadata(version)
if 'user-data' in data:
filepath = os.path.join('ec2', version, 'user-data')
yield (filepath, data['user-data'])
del data['user-data']
try:
del data['public-keys']['0']['_name']
except KeyError:
pass
filepath = os.path.join('ec2', version, 'meta-data.json')
yield (filepath, jsonutils.dumps(data['meta-data']))
ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"]
for version in ALL_OPENSTACK_VERSIONS:
path = 'openstack/%s/%s' % (version, MD_JSON_NAME)
yield (path, self.lookup(path))
path = 'openstack/%s/%s' % (version, UD_NAME)
if self.userdata_raw is not None:
yield (path, self.lookup(path))
if self._check_version(HAVANA, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
yield (path, self.lookup(path))
for (cid, content) in six.iteritems(self.content):
if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, NW_JSON_NAME)
yield (path, self.lookup(path))
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
class RouteConfiguration(object):
"""Routes metadata paths to request handlers."""
def __init__(self, path_handler):
self.path_handlers = path_handler
def _version(self, version):
if version == "latest":
version = OPENSTACK_VERSIONS[-1]
if version not in OPENSTACK_VERSIONS:
raise InvalidMetadataVersion(version)
return version
def handle_path(self, path_tokens):
version = self._version(path_tokens[0])
if len(path_tokens) == 1:
path = VERSION
else:
path = '/'.join(path_tokens[1:])
path_handler = self.path_handlers[path]
if path_handler is None:
raise KeyError(path)
return path_handler(version, path)
class VendorDataDriver(object):
"""The base VendorData Drivers should inherit from."""
def __init__(self, *args, **kwargs):
"""Init method should do all expensive operations."""
self._data = {}
def get(self):
"""Return a dictionary of primitives to be rendered in metadata
:return: A dictionary or primitives.
"""
return self._data
def get_metadata_by_address(address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
return get_metadata_by_instance_id(fixed_ip['instance_uuid'],
address,
ctxt)
def get_metadata_by_instance_id(instance_id, address, ctxt=None):
ctxt = ctxt or context.get_admin_context()
instance = objects.Instance.get_by_uuid(
ctxt, instance_id, expected_attrs=['ec2_ids', 'flavor', 'info_cache'])
return InstanceMetadata(instance, address)
def _format_instance_mapping(ctxt, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
return block_device.instance_block_mapping(instance, bdms)
def ec2_md_print(data):
if isinstance(data, dict):
output = ''
for key in sorted(data.keys()):
if key == '_name':
continue
if isinstance(data[key], dict):
if '_name' in data[key]:
output += str(data[key]['_name'])
else:
output += key + '/'
else:
output += key
output += '\n'
return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
return str(data)
def find_path_in_tree(data, path_tokens):
# given a dict/list tree, and a path in that tree, return data found there.
for i in range(0, len(path_tokens)):
if isinstance(data, dict) or isinstance(data, list):
if path_tokens[i] in data:
data = data[path_tokens[i]]
else:
raise KeyError("/".join(path_tokens[0:i]))
else:
if i != len(path_tokens) - 1:
raise KeyError("/".join(path_tokens[0:i]))
data = data[path_tokens[i]]
return data
| apache-2.0 |
gavinelliott/churchill | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
AlexBoogaard/Sick-Beard-Torrent-Edition | lib/unidecode/x08b.py | 252 | 4643 | data = (
'Mou ', # 0x00
'Ye ', # 0x01
'Wei ', # 0x02
'[?] ', # 0x03
'Teng ', # 0x04
'Zou ', # 0x05
'Shan ', # 0x06
'Jian ', # 0x07
'Bo ', # 0x08
'Ku ', # 0x09
'Huang ', # 0x0a
'Huo ', # 0x0b
'Ge ', # 0x0c
'Ying ', # 0x0d
'Mi ', # 0x0e
'Xiao ', # 0x0f
'Mi ', # 0x10
'Xi ', # 0x11
'Qiang ', # 0x12
'Chen ', # 0x13
'Nue ', # 0x14
'Ti ', # 0x15
'Su ', # 0x16
'Bang ', # 0x17
'Chi ', # 0x18
'Qian ', # 0x19
'Shi ', # 0x1a
'Jiang ', # 0x1b
'Yuan ', # 0x1c
'Xie ', # 0x1d
'Xue ', # 0x1e
'Tao ', # 0x1f
'Yao ', # 0x20
'Yao ', # 0x21
'[?] ', # 0x22
'Yu ', # 0x23
'Biao ', # 0x24
'Cong ', # 0x25
'Qing ', # 0x26
'Li ', # 0x27
'Mo ', # 0x28
'Mo ', # 0x29
'Shang ', # 0x2a
'Zhe ', # 0x2b
'Miu ', # 0x2c
'Jian ', # 0x2d
'Ze ', # 0x2e
'Jie ', # 0x2f
'Lian ', # 0x30
'Lou ', # 0x31
'Can ', # 0x32
'Ou ', # 0x33
'Guan ', # 0x34
'Xi ', # 0x35
'Zhuo ', # 0x36
'Ao ', # 0x37
'Ao ', # 0x38
'Jin ', # 0x39
'Zhe ', # 0x3a
'Yi ', # 0x3b
'Hu ', # 0x3c
'Jiang ', # 0x3d
'Man ', # 0x3e
'Chao ', # 0x3f
'Han ', # 0x40
'Hua ', # 0x41
'Chan ', # 0x42
'Xu ', # 0x43
'Zeng ', # 0x44
'Se ', # 0x45
'Xi ', # 0x46
'She ', # 0x47
'Dui ', # 0x48
'Zheng ', # 0x49
'Nao ', # 0x4a
'Lan ', # 0x4b
'E ', # 0x4c
'Ying ', # 0x4d
'Jue ', # 0x4e
'Ji ', # 0x4f
'Zun ', # 0x50
'Jiao ', # 0x51
'Bo ', # 0x52
'Hui ', # 0x53
'Zhuan ', # 0x54
'Mu ', # 0x55
'Zen ', # 0x56
'Zha ', # 0x57
'Shi ', # 0x58
'Qiao ', # 0x59
'Tan ', # 0x5a
'Zen ', # 0x5b
'Pu ', # 0x5c
'Sheng ', # 0x5d
'Xuan ', # 0x5e
'Zao ', # 0x5f
'Tan ', # 0x60
'Dang ', # 0x61
'Sui ', # 0x62
'Qian ', # 0x63
'Ji ', # 0x64
'Jiao ', # 0x65
'Jing ', # 0x66
'Lian ', # 0x67
'Nou ', # 0x68
'Yi ', # 0x69
'Ai ', # 0x6a
'Zhan ', # 0x6b
'Pi ', # 0x6c
'Hui ', # 0x6d
'Hua ', # 0x6e
'Yi ', # 0x6f
'Yi ', # 0x70
'Shan ', # 0x71
'Rang ', # 0x72
'Nou ', # 0x73
'Qian ', # 0x74
'Zhui ', # 0x75
'Ta ', # 0x76
'Hu ', # 0x77
'Zhou ', # 0x78
'Hao ', # 0x79
'Ye ', # 0x7a
'Ying ', # 0x7b
'Jian ', # 0x7c
'Yu ', # 0x7d
'Jian ', # 0x7e
'Hui ', # 0x7f
'Du ', # 0x80
'Zhe ', # 0x81
'Xuan ', # 0x82
'Zan ', # 0x83
'Lei ', # 0x84
'Shen ', # 0x85
'Wei ', # 0x86
'Chan ', # 0x87
'Li ', # 0x88
'Yi ', # 0x89
'Bian ', # 0x8a
'Zhe ', # 0x8b
'Yan ', # 0x8c
'E ', # 0x8d
'Chou ', # 0x8e
'Wei ', # 0x8f
'Chou ', # 0x90
'Yao ', # 0x91
'Chan ', # 0x92
'Rang ', # 0x93
'Yin ', # 0x94
'Lan ', # 0x95
'Chen ', # 0x96
'Huo ', # 0x97
'Zhe ', # 0x98
'Huan ', # 0x99
'Zan ', # 0x9a
'Yi ', # 0x9b
'Dang ', # 0x9c
'Zhan ', # 0x9d
'Yan ', # 0x9e
'Du ', # 0x9f
'Yan ', # 0xa0
'Ji ', # 0xa1
'Ding ', # 0xa2
'Fu ', # 0xa3
'Ren ', # 0xa4
'Ji ', # 0xa5
'Jie ', # 0xa6
'Hong ', # 0xa7
'Tao ', # 0xa8
'Rang ', # 0xa9
'Shan ', # 0xaa
'Qi ', # 0xab
'Tuo ', # 0xac
'Xun ', # 0xad
'Yi ', # 0xae
'Xun ', # 0xaf
'Ji ', # 0xb0
'Ren ', # 0xb1
'Jiang ', # 0xb2
'Hui ', # 0xb3
'Ou ', # 0xb4
'Ju ', # 0xb5
'Ya ', # 0xb6
'Ne ', # 0xb7
'Xu ', # 0xb8
'E ', # 0xb9
'Lun ', # 0xba
'Xiong ', # 0xbb
'Song ', # 0xbc
'Feng ', # 0xbd
'She ', # 0xbe
'Fang ', # 0xbf
'Jue ', # 0xc0
'Zheng ', # 0xc1
'Gu ', # 0xc2
'He ', # 0xc3
'Ping ', # 0xc4
'Zu ', # 0xc5
'Shi ', # 0xc6
'Xiong ', # 0xc7
'Zha ', # 0xc8
'Su ', # 0xc9
'Zhen ', # 0xca
'Di ', # 0xcb
'Zou ', # 0xcc
'Ci ', # 0xcd
'Qu ', # 0xce
'Zhao ', # 0xcf
'Bi ', # 0xd0
'Yi ', # 0xd1
'Yi ', # 0xd2
'Kuang ', # 0xd3
'Lei ', # 0xd4
'Shi ', # 0xd5
'Gua ', # 0xd6
'Shi ', # 0xd7
'Jie ', # 0xd8
'Hui ', # 0xd9
'Cheng ', # 0xda
'Zhu ', # 0xdb
'Shen ', # 0xdc
'Hua ', # 0xdd
'Dan ', # 0xde
'Gou ', # 0xdf
'Quan ', # 0xe0
'Gui ', # 0xe1
'Xun ', # 0xe2
'Yi ', # 0xe3
'Zheng ', # 0xe4
'Gai ', # 0xe5
'Xiang ', # 0xe6
'Cha ', # 0xe7
'Hun ', # 0xe8
'Xu ', # 0xe9
'Zhou ', # 0xea
'Jie ', # 0xeb
'Wu ', # 0xec
'Yu ', # 0xed
'Qiao ', # 0xee
'Wu ', # 0xef
'Gao ', # 0xf0
'You ', # 0xf1
'Hui ', # 0xf2
'Kuang ', # 0xf3
'Shuo ', # 0xf4
'Song ', # 0xf5
'Ai ', # 0xf6
'Qing ', # 0xf7
'Zhu ', # 0xf8
'Zou ', # 0xf9
'Nuo ', # 0xfa
'Du ', # 0xfb
'Zhuo ', # 0xfc
'Fei ', # 0xfd
'Ke ', # 0xfe
'Wei ', # 0xff
)
| gpl-3.0 |
Maratyszcza/ninja-pypi | misc/ninja_syntax_test.py | 11 | 6604 | #!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import ninja_syntax
LONGWORD = 'a' * 10
LONGWORDWITHSPACES = 'a'*5 + '$ ' + 'a'*5
INDENT = ' '
class TestLineWordWrap(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.n = ninja_syntax.Writer(self.out, width=8)
def test_single_long_word(self):
# We shouldn't wrap a single long word.
self.n._line(LONGWORD)
self.assertEqual(LONGWORD + '\n', self.out.getvalue())
def test_few_long_words(self):
# We should wrap a line where the second word is overlong.
self.n._line(' '.join(['x', LONGWORD, 'y']))
self.assertEqual(' $\n'.join(['x',
INDENT + LONGWORD,
INDENT + 'y']) + '\n',
self.out.getvalue())
def test_comment_wrap(self):
# Filenames shoud not be wrapped
self.n.comment('Hello /usr/local/build-tools/bin')
self.assertEqual('# Hello\n# /usr/local/build-tools/bin\n',
self.out.getvalue())
def test_short_words_indented(self):
# Test that indent is taking into acount when breaking subsequent lines.
# The second line should not be ' to tree', as that's longer than the
# test layout width of 8.
self.n._line('line_one to tree')
self.assertEqual('''\
line_one $
to $
tree
''',
self.out.getvalue())
def test_few_long_words_indented(self):
# Check wrapping in the presence of indenting.
self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1)
self.assertEqual(' $\n'.join([' ' + 'x',
' ' + INDENT + LONGWORD,
' ' + INDENT + 'y']) + '\n',
self.out.getvalue())
def test_escaped_spaces(self):
self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y']))
self.assertEqual(' $\n'.join(['x',
INDENT + LONGWORDWITHSPACES,
INDENT + 'y']) + '\n',
self.out.getvalue())
def test_fit_many_words(self):
self.n = ninja_syntax.Writer(self.out, width=78)
self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1)
self.assertEqual('''\
command = cd ../../chrome; python ../tools/grit/grit/format/repack.py $
../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak $
../out/Debug/gen/chrome/theme_resources_large.pak
''',
self.out.getvalue())
def test_leading_space(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['', '-bar', '-somethinglong'], 0)
self.assertEqual('''\
foo = -bar $
-somethinglong
''',
self.out.getvalue())
def test_embedded_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=15) # force wrapping
self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$b $
-somethinglong
''',
self.out.getvalue())
def test_two_embedded_dollar_dollars(self):
self.n = ninja_syntax.Writer(self.out, width=17) # force wrapping
self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$b $
-somethinglong
''',
self.out.getvalue())
def test_leading_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = $$b $
-somethinglong
''',
self.out.getvalue())
def test_trailing_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['a$$', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$ $
-somethinglong
''',
self.out.getvalue())
class TestBuild(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.n = ninja_syntax.Writer(self.out)
def test_variables_dict(self):
self.n.build('out', 'cc', 'in', variables={'name': 'value'})
self.assertEqual('''\
build out: cc in
name = value
''',
self.out.getvalue())
def test_variables_list(self):
self.n.build('out', 'cc', 'in', variables=[('name', 'value')])
self.assertEqual('''\
build out: cc in
name = value
''',
self.out.getvalue())
def test_implicit_outputs(self):
self.n.build('o', 'cc', 'i', implicit_outputs='io')
self.assertEqual('''\
build o | io: cc i
''',
self.out.getvalue())
class TestExpand(unittest.TestCase):
def test_basic(self):
vars = {'x': 'X'}
self.assertEqual('foo', ninja_syntax.expand('foo', vars))
def test_var(self):
vars = {'xyz': 'XYZ'}
self.assertEqual('fooXYZ', ninja_syntax.expand('foo$xyz', vars))
def test_vars(self):
vars = {'x': 'X', 'y': 'YYY'}
self.assertEqual('XYYY', ninja_syntax.expand('$x$y', vars))
def test_space(self):
vars = {}
self.assertEqual('x y z', ninja_syntax.expand('x$ y$ z', vars))
def test_locals(self):
vars = {'x': 'a'}
local_vars = {'x': 'b'}
self.assertEqual('a', ninja_syntax.expand('$x', vars))
self.assertEqual('b', ninja_syntax.expand('$x', vars, local_vars))
def test_double(self):
self.assertEqual('a b$c', ninja_syntax.expand('a$ b$$c', {}))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
quamilek/django | django/views/decorators/csrf.py | 586 | 2202 | from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause |
jostep/tensorflow | tensorflow/contrib/cluster_resolver/python/training/gce_cluster_resolver.py | 41 | 5116 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for GCE Instance Groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
class GceClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a Cluster Resolver object suitable for use for distributed
TensorFlow.
"""
def __init__(self,
project,
zone,
instance_group,
port,
job_name='worker',
credentials='default',
service=None):
"""Creates a new GceClusterResolver object.
This takes in a few parameters and creates a GceClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project
zone: Zone of the GCE instance group
instance_group: Name of the GCE instance group
port: Port of the listening TensorFlow server (default: 8470)
job_name: Name of the TensorFlow job this set of instances belongs to
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default()
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._job_name = job_name
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._job_name: worker_list})
| apache-2.0 |
lindzey/pelican-plugins | asciidoc_reader/asciidoc_reader.py | 24 | 1918 | # -*- coding: utf-8 -*-
"""
AsciiDoc Reader
===============
This plugin allows you to use AsciiDoc to write your posts.
File extension should be ``.asc``, ``.adoc``, or ``asciidoc``.
"""
from pelican.readers import BaseReader
from pelican.utils import pelican_open
from pelican import signals
try:
# asciidocapi won't import on Py3
from .asciidocapi import AsciiDocAPI, AsciiDocError
# AsciiDocAPI class checks for asciidoc.py
AsciiDocAPI()
except:
asciidoc_enabled = False
else:
asciidoc_enabled = True
class AsciiDocReader(BaseReader):
"""Reader for AsciiDoc files"""
enabled = asciidoc_enabled
file_extensions = ['asc', 'adoc', 'asciidoc']
default_options = ["--no-header-footer", "-a newline=\\n"]
default_backend = 'html5'
def read(self, source_path):
"""Parse content and metadata of asciidoc files"""
from cStringIO import StringIO
with pelican_open(source_path) as source:
text = StringIO(source.encode('utf8'))
content = StringIO()
ad = AsciiDocAPI()
options = self.settings.get('ASCIIDOC_OPTIONS', [])
options = self.default_options + options
for o in options:
ad.options(*o.split())
backend = self.settings.get('ASCIIDOC_BACKEND', self.default_backend)
ad.execute(text, content, backend=backend)
content = content.getvalue().decode('utf8')
metadata = {}
for name, value in ad.asciidoc.document.attributes.items():
name = name.lower()
metadata[name] = self.process_metadata(name, value)
if 'doctitle' in metadata:
metadata['title'] = metadata['doctitle']
return content, metadata
def add_reader(readers):
for ext in AsciiDocReader.file_extensions:
readers.reader_classes[ext] = AsciiDocReader
def register():
signals.readers_init.connect(add_reader)
| agpl-3.0 |
SecsAndCyber/wincrypt.py | WinCrypto/crypt32.py | 1 | 3284 | from WinCrypto.crypt_structs import *
from ctypes import *
from struct import unpack, pack
import binascii
"""
WINCRYPT32API
BOOL
WINAPI
CryptQueryObject(
_In_ DWORD dwObjectType,
_In_ const void *pvObject,
_In_ DWORD dwExpectedContentTypeFlags,
_In_ DWORD dwExpectedFormatTypeFlags,
_In_ DWORD dwFlags,
_Out_opt_ DWORD *pdwMsgAndCertEncodingType,
_Out_opt_ DWORD *pdwContentType,
_Out_opt_ DWORD *pdwFormatType,
_Out_opt_ HCERTSTORE *phCertStore,
_Out_opt_ HCRYPTMSG *phMsg,
_Outptr_opt_result_maybenull_ const void **ppvContext
);
"""
CryptQueryObject = crypt32_dll.CryptQueryObject
CryptQueryObject.res_type = c_bool
CryptQueryObject.argtypes = [DWORD, PVOID, DWORD, DWORD, DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), POINTER(HCERTSTORE), POINTER(HCRYPTMSG), POINTER(POINTER(PVOID)) ]
"""
WINCRYPT32API
BOOL
WINAPI
CryptMsgGetParam(
_In_ HCRYPTMSG hCryptMsg,
_In_ DWORD dwParamType,
_In_ DWORD dwIndex,
_Out_writes_bytes_to_opt_(*pcbData, *pcbData) void *pvData,
_Inout_ DWORD *pcbData
);
"""
CryptMsgGetParam = crypt32_dll.CryptMsgGetParam
CryptMsgGetParam.res_type = c_bool
CryptMsgGetParam.argtypes = [ HCRYPTMSG, DWORD, DWORD, POINTER(BYTE), POINTER(DWORD) ]
"""
WINCRYPT32API
BOOL
WINAPI
CryptDecodeObject(
_In_ DWORD dwCertEncodingType,
_In_ LPCSTR lpszStructType,
_In_reads_bytes_(cbEncoded) const BYTE *pbEncoded,
_In_ DWORD cbEncoded,
_In_ DWORD dwFlags,
_Out_writes_bytes_to_opt_(*pcbStructInfo, *pcbStructInfo) void *pvStructInfo,
_Inout_ DWORD *pcbStructInfo
);
"""
CryptDecodeObject = crypt32_dll.CryptDecodeObject
CryptDecodeObject.res_type = c_bool
CryptDecodeObject.argtypes = [ DWORD, LPCSTR, PVOID, DWORD, DWORD, POINTER(BYTE), POINTER(DWORD) ]
"""
WINCRYPT32API
PCCERT_CONTEXT
WINAPI
CertFindCertificateInStore(
_In_ HCERTSTORE hCertStore,
_In_ DWORD dwCertEncodingType,
_In_ DWORD dwFindFlags,
_In_ DWORD dwFindType,
_In_opt_ const void *pvFindPara,
_In_opt_ PCCERT_CONTEXT pPrevCertContext
);
"""
CertFindCertificateInStore = crypt32_dll.CertFindCertificateInStore
CertFindCertificateInStore.res_type = POINTER(CERT_CONTEXT)
CertFindCertificateInStore.argtypes = [ HCERTSTORE, DWORD, DWORD, DWORD, PVOID, POINTER(CERT_CONTEXT) ]
"""
WINCRYPT32API
DWORD
WINAPI
CertNameToStrW(
_In_ DWORD dwCertEncodingType,
_In_ PCERT_NAME_BLOB pName,
_In_ DWORD dwStrType,
_Out_writes_to_opt_(csz, return) LPWSTR psz,
_In_ DWORD csz
);
"""
CertNameToStr = crypt32_dll.CertNameToStrW
CertNameToStr.res_type = DWORD
CertNameToStr.argtypes = [ DWORD, PCERT_NAME_BLOB, DWORD, LPWSTR, DWORD ]
"""
WINCRYPT32API
DWORD
WINAPI
CertGetNameStringW(
_In_ PCCERT_CONTEXT pCertContext,
_In_ DWORD dwType,
_In_ DWORD dwFlags,
_In_opt_ void *pvTypePara,
_Out_writes_to_opt_(cchNameString, return) LPWSTR pszNameString,
_In_ DWORD cchNameString
);
"""
CertGetNameString = crypt32_dll.CertGetNameStringW
CertGetNameString.res_type = DWORD
CertGetNameString.argtypes = [ POINTER(CERT_CONTEXT), DWORD, DWORD, PVOID, LPWSTR, DWORD ]
| mit |
rmrice/cool-as-a-cucumber | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | 3426 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| mit |
rwillmer/django | tests/template_tests/syntax_tests/test_comment.py | 521 | 3667 | from django.test import SimpleTestCase
from ..utils import setup
class CommentSyntaxTests(SimpleTestCase):
@setup({'comment-syntax01': '{# this is hidden #}hello'})
def test_comment_syntax01(self):
output = self.engine.render_to_string('comment-syntax01')
self.assertEqual(output, 'hello')
@setup({'comment-syntax02': '{# this is hidden #}hello{# foo #}'})
def test_comment_syntax02(self):
output = self.engine.render_to_string('comment-syntax02')
self.assertEqual(output, 'hello')
@setup({'comment-syntax03': 'foo{# {% if %} #}'})
def test_comment_syntax03(self):
output = self.engine.render_to_string('comment-syntax03')
self.assertEqual(output, 'foo')
@setup({'comment-syntax04': 'foo{# {% endblock %} #}'})
def test_comment_syntax04(self):
output = self.engine.render_to_string('comment-syntax04')
self.assertEqual(output, 'foo')
@setup({'comment-syntax05': 'foo{# {% somerandomtag %} #}'})
def test_comment_syntax05(self):
output = self.engine.render_to_string('comment-syntax05')
self.assertEqual(output, 'foo')
@setup({'comment-syntax06': 'foo{# {% #}'})
def test_comment_syntax06(self):
output = self.engine.render_to_string('comment-syntax06')
self.assertEqual(output, 'foo')
@setup({'comment-syntax07': 'foo{# %} #}'})
def test_comment_syntax07(self):
output = self.engine.render_to_string('comment-syntax07')
self.assertEqual(output, 'foo')
@setup({'comment-syntax08': 'foo{# %} #}bar'})
def test_comment_syntax08(self):
output = self.engine.render_to_string('comment-syntax08')
self.assertEqual(output, 'foobar')
@setup({'comment-syntax09': 'foo{# {{ #}'})
def test_comment_syntax09(self):
output = self.engine.render_to_string('comment-syntax09')
self.assertEqual(output, 'foo')
@setup({'comment-syntax10': 'foo{# }} #}'})
def test_comment_syntax10(self):
output = self.engine.render_to_string('comment-syntax10')
self.assertEqual(output, 'foo')
@setup({'comment-syntax11': 'foo{# { #}'})
def test_comment_syntax11(self):
output = self.engine.render_to_string('comment-syntax11')
self.assertEqual(output, 'foo')
@setup({'comment-syntax12': 'foo{# } #}'})
def test_comment_syntax12(self):
output = self.engine.render_to_string('comment-syntax12')
self.assertEqual(output, 'foo')
@setup({'comment-tag01': '{% comment %}this is hidden{% endcomment %}hello'})
def test_comment_tag01(self):
output = self.engine.render_to_string('comment-tag01')
self.assertEqual(output, 'hello')
@setup({'comment-tag02': '{% comment %}this is hidden{% endcomment %}'
'hello{% comment %}foo{% endcomment %}'})
def test_comment_tag02(self):
output = self.engine.render_to_string('comment-tag02')
self.assertEqual(output, 'hello')
@setup({'comment-tag03': 'foo{% comment %} {% if %} {% endcomment %}'})
def test_comment_tag03(self):
output = self.engine.render_to_string('comment-tag03')
self.assertEqual(output, 'foo')
@setup({'comment-tag04': 'foo{% comment %} {% endblock %} {% endcomment %}'})
def test_comment_tag04(self):
output = self.engine.render_to_string('comment-tag04')
self.assertEqual(output, 'foo')
@setup({'comment-tag05': 'foo{% comment %} {% somerandomtag %} {% endcomment %}'})
def test_comment_tag05(self):
output = self.engine.render_to_string('comment-tag05')
self.assertEqual(output, 'foo')
| bsd-3-clause |
Learningtribes/edx-platform | pavelib/paver_tests/test_safecommit.py | 27 | 1391 | """
Tests for paver safecommit quality tasks
"""
from mock import patch
import pavelib.quality
from paver.easy import call_task
from .utils import PaverTestCase
class PaverSafeCommitTest(PaverTestCase):
"""
Test run_safecommit_report with a mocked environment in order to pass in
opts.
"""
def setUp(self):
super(PaverSafeCommitTest, self).setUp()
self.reset_task_messages()
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safecommit_count')
def test_safecommit_violation_number_not_found(self, _mock_count, _mock_report_dir, _mock_write_metric):
"""
run_safecommit_report encounters an error parsing the safecommit output
log.
"""
_mock_count.return_value = None
with self.assertRaises(SystemExit):
call_task('pavelib.quality.run_safecommit_report')
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_safecommit_count')
def test_safecommit_vanilla(self, _mock_count, _mock_report_dir, _mock_write_metric):
"""
run_safecommit_report finds violations.
"""
_mock_count.return_value = 0
call_task('pavelib.quality.run_safecommit_report')
| agpl-3.0 |
newswangerd/ansible | lib/ansible/plugins/inventory/__init__.py | 9 | 20585 | # (c) 2017, Red Hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import hashlib
import os
import string
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory.group import to_safe_group_name as original_safe
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import AnsiblePlugin
from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars
display = Display()
# Helper methods
def to_safe_group_name(name):
# placeholder for backwards compat
return original_safe(name, force=True, silent=True)
def detect_range(line=None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returns True if the given line contains a pattern, else False.
'''
return '[' in line
def expand_hostname_range(line=None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
(head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise AnsibleError("host range must be begin:end or begin:end:step")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise AnsibleError("host range must specify end value")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise AnsibleError("host range must specify equal-length begin and end formats")
def fill(x):
return str(x).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise AnsibleError("host range must have begin <= end")
seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
except ValueError: # not an alpha range
seq = range(int(beg), int(end) + 1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend(expand_hostname_range(hname))
else:
all_hosts.append(hname)
return all_hosts
def get_cache_plugin(plugin_name, **kwargs):
try:
cache = CacheObject(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
"to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
raise e
if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
self._options = {}
self.inventory = None
self.display = display
self._vars = {}
def parse(self, inventory, loader, path, cache=True):
''' Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
'''
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
self._vars = load_extra_vars(loader)
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
'''
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
''' validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
'''
config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache=False)
except Exception as e:
raise AnsibleParserError(to_native(e))
# a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
valid_names = getattr(self, '_redirected_names') or [self.NAME]
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') not in valid_names:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
''' update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
'''
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
class BaseFileInventoryPlugin(BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
def __init__(self):
super(BaseFileInventoryPlugin, self).__init__()
class DeprecatedCache(object):
def __init__(self, real_cacheable):
self.real_cacheable = real_cacheable
def get(self, key):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'When expecting a KeyError, use self._cache[key] instead of using self.cache.get(key). '
'self._cache is a dictionary and will return a default value instead of raising a KeyError '
'when the key does not exist', version='2.12', collection_name='ansible.builtin')
return self.real_cacheable._cache[key]
def set(self, key, value):
display.deprecated('InventoryModule should utilize self._cache as a dict instead of self.cache. '
'To set the self._cache dictionary, use self._cache[key] = value instead of self.cache.set(key, value). '
'To force update the underlying cache plugin with the contents of self._cache before parse() is complete, '
'call self.set_cache_plugin and it will use the self._cache dictionary to update the cache plugin',
version='2.12', collection_name='ansible.builtin')
self.real_cacheable._cache[key] = value
self.real_cacheable.set_cache_plugin()
def __getattr__(self, name):
display.deprecated('InventoryModule should utilize self._cache instead of self.cache',
version='2.12', collection_name='ansible.builtin')
return self.real_cacheable._cache.__getattribute__(name)
class Cacheable(object):
_cache = CacheObject()
@property
def cache(self):
return DeprecatedCache(self)
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
def _get_cache_prefix(self, path):
''' create predictable unique prefix for plugin/inventory '''
m = hashlib.sha1()
m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
d1 = m.hexdigest()
n = hashlib.sha1()
n.update(to_bytes(path, errors='surrogate_or_strict'))
d2 = n.hexdigest()
return 's_'.join([d1[:5], d2[:5]])
def clear_cache(self):
self._cache.flush()
def update_cache_if_changed(self):
self._cache.update_cache_if_changed()
def set_cache_plugin(self):
self._cache.set_cache()
class Constructable(object):
def _compose(self, template, variables):
''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
t = self.templar
try:
use_extra = self.get_option('use_extra_vars')
except Exception:
use_extra = False
if use_extra:
t.available_variables = combine_vars(variables, self._vars)
else:
t.available_variables = variables
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
def _set_composite_vars(self, compose, variables, host, strict=False):
''' loops over compose entries to create vars for hosts '''
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False):
''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
# process each 'group entry'
if groups and isinstance(groups, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
group_name = original_safe(group_name, force=True)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False):
''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
if key:
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
if raw_parent_name:
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleError as e:
if strict:
raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
continue
new_raw_group_names = []
if isinstance(key, string_types):
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
name = '%s%s%s' % (gname, sep, gval)
new_raw_group_names.append(name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
if prefix == '' and self.get_option('leading_separator') is False:
sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
| gpl-3.0 |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/initshutdown.py | 1 | 2913 | # encoding: utf-8
# module samba.dcerpc.initshutdown
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/initshutdown.so
# by generator 1.135
""" initshutdown DCE/RPC """
# imports
import dcerpc as __dcerpc
# Variables with simple values
SHTDN_REASON_FLAG_PLANNED = 2147483648
SHTDN_REASON_FLAG_USER_DEFINED = 1073741824
SHTDN_REASON_MAJOR_APPLICATION = 262144
SHTDN_REASON_MAJOR_HARDWARE = 65536
SHTDN_REASON_MAJOR_LEGACY_API = 458752
SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 131072
SHTDN_REASON_MAJOR_OTHER = 0
SHTDN_REASON_MAJOR_POWER = 393216
SHTDN_REASON_MAJOR_SOFTWARE = 196608
SHTDN_REASON_MAJOR_SYSTEM = 327680
SHTDN_REASON_MINOR_BLUESCREEN = 15
SHTDN_REASON_MINOR_CORDUNPLUGGED = 11
SHTDN_REASON_MINOR_DISK = 7
SHTDN_REASON_MINOR_ENVIRONMENT = 12
SHTDN_REASON_MINOR_HARDWARE_DRIVER = 13
SHTDN_REASON_MINOR_HOTFIX = 17
SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 23
SHTDN_REASON_MINOR_HUNG = 5
SHTDN_REASON_MINOR_INSTALLATION = 2
SHTDN_REASON_MINOR_MAINTENANCE = 1
SHTDN_REASON_MINOR_MMC = 25
SHTDN_REASON_MINOR_NETWORKCARD = 9
SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 20
SHTDN_REASON_MINOR_OTHER = 0
SHTDN_REASON_MINOR_OTHERDRIVER = 14
SHTDN_REASON_MINOR_POWER_SUPPLY = 10
SHTDN_REASON_MINOR_PROCESSOR = 8
SHTDN_REASON_MINOR_RECONFIG = 4
SHTDN_REASON_MINOR_SECURITY = 19
SHTDN_REASON_MINOR_SECURITYFIX = 18
SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 24
SHTDN_REASON_MINOR_SERVICEPACK = 16
SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 22
SHTDN_REASON_MINOR_TERMSRV = 32
SHTDN_REASON_MINOR_UNSTABLE = 6
SHTDN_REASON_MINOR_UPGRADE = 3
SHTDN_REASON_MINOR_WMI = 21
# no functions
# classes
class initshutdown(__dcerpc.ClientConnection):
"""
initshutdown(binding, lp_ctx=None, credentials=None) -> connection
binding should be a DCE/RPC binding string (for example: ncacn_ip_tcp:127.0.0.1)
lp_ctx should be a path to a smb.conf file or a param.LoadParm object
credentials should be a credentials.Credentials object.
Init shutdown service
"""
def Abort(self, server): # real signature unknown; restored from __doc__
""" S.Abort(server) -> None """
pass
def Init(self, hostname, message, timeout, force_apps, do_reboot): # real signature unknown; restored from __doc__
""" S.Init(hostname, message, timeout, force_apps, do_reboot) -> None """
pass
def InitEx(self, hostname, message, timeout, force_apps, do_reboot, reason): # real signature unknown; restored from __doc__
""" S.InitEx(hostname, message, timeout, force_apps, do_reboot, reason) -> None """
pass
def __init__(self, binding, lp_ctx=None, credentials=None): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
| gpl-2.0 |
MTG/gaia | src/bindings/pygaia/scripts/collections/validate_all_collections.py | 1 | 1300 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Gaia
#
# Gaia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from __future__ import print_function
import os, sys, subprocess, yaml
from os.path import *
from gaia2.mtgdb.environment import MTGDB_AUDIO
import gaia2
if __name__ == '__main__':
collections_file = join(gaia2.rootdir(), 'mtgdb', 'mtgdb_collections.yaml')
for db in yaml.load(open(collections_file).read()):
cmd = [ 'python', join(split(__file__)[0], 'validate_collection.py'), join(MTGDB_AUDIO, db['location']) ]
subprocess.call(cmd)
print('\n')
| agpl-3.0 |
MziRintu/kitsune | kitsune/sumo/tests/test_templates.py | 13 | 3924 | from django.conf import settings
from django.test.client import RequestFactory
from django.utils import translation
import jingo
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.sumo.tests import LocalizingClient, TestCase
from kitsune.sumo.urlresolvers import reverse
def setup():
jingo.load_helpers()
def test_breadcrumb():
"""Make sure breadcrumb links start with /."""
c = LocalizingClient()
response = c.get(reverse('search'))
doc = pq(response.content)
href = doc('.breadcrumbs a')[0]
eq_('/', href.attrib['href'][0])
class MockRequestTests(TestCase):
"""Base class for tests that need a mock request"""
def setUp(self):
super(MockRequestTests, self).setUp()
request = RequestFactory()
request.GET = {}
request.LANGUAGE_CODE = 'en-US'
self.request = request
class BaseTemplateTests(MockRequestTests):
"""Tests for base.html"""
def setUp(self):
super(BaseTemplateTests, self).setUp()
self.template = 'base.html'
def test_dir_ltr(self):
"""Make sure dir attr is set to 'ltr' for LTR language."""
html = jingo.render_to_string(self.request, self.template)
eq_('ltr', pq(html)('html').attr['dir'])
def test_dir_rtl(self):
"""Make sure dir attr is set to 'rtl' for RTL language."""
translation.activate('he')
self.request.LANGUAGE_CODE = 'he'
html = jingo.render_to_string(self.request, self.template)
eq_('rtl', pq(html)('html').attr['dir'])
translation.deactivate()
def test_multi_feeds(self):
"""Ensure that multiple feeds are put into the page when set."""
feed_urls = (('/feed_one', 'First Feed'),
('/feed_two', 'Second Feed'),)
doc = pq(jingo.render_to_string(self.request, self.template, {
'feeds': feed_urls}))
feeds = doc('link[type="application/atom+xml"]')
eq_(2, len(feeds))
eq_('First Feed', feeds[0].attrib['title'])
eq_('Second Feed', feeds[1].attrib['title'])
def test_readonly_attr(self):
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
eq_('false', doc('body')[0].attrib['data-readonly'])
@mock.patch.object(settings._wrapped, 'READ_ONLY', True)
def test_readonly_login_link_disabled(self):
"""Ensure that login/register links are hidden in READ_ONLY."""
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
eq_(0, len(doc('a.sign-out, a.sign-in')))
@mock.patch.object(settings._wrapped, 'READ_ONLY', False)
def test_not_readonly_login_link_enabled(self):
"""Ensure that login/register links are visible in not READ_ONLY."""
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
assert len(doc('a.sign-out, a.sign-in')) > 0
class ErrorListTests(MockRequestTests):
"""Tests for errorlist.html, which renders form validation errors."""
def test_escaping(self):
"""Make sure we escape HTML entities, lest we court XSS errors."""
class MockForm(object):
errors = True
auto_id = 'id_'
def visible_fields(self):
return [{'errors': ['<"evil&ness-field">']}]
def non_field_errors(self):
return ['<"evil&ness-non-field">']
source = ("""{% from "layout/errorlist.html" import errorlist %}"""
"""{{ errorlist(form) }}""")
html = jingo.render_to_string(self.request,
jingo.env.from_string(source),
{'form': MockForm()})
assert '<"evil&ness' not in html
assert '<"evil&ness-field">' in html
assert '<"evil&ness-non-field">' in html
| bsd-3-clause |
Pallokala/ansible-modules-core | cloud/docker/docker.py | 9 | 59482 | #!/usr/bin/python
# (c) 2013, Cove Schneider
# (c) 2014, Joshua Conner <[email protected]>
# (c) 2014, Pavel Antonov <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
DOCUMENTATION = '''
---
module: docker
version_added: "1.4"
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
options:
count:
description:
- Number of matching containers that should be in the desired state.
default: 1
image:
description:
- Container image used to match and launch containers.
required: true
pull:
description:
- Control when container images are updated from the C(docker_url) registry.
If "missing," images will be pulled only when missing from the host;
if '"always," the registry will be checked for a newer version of the
image' each time the task executes.
default: missing
choices: [ "missing", "always" ]
version_added: "1.9"
command:
description:
- Command used to match and launch containers.
default: null
name:
description:
- Name used to match and uniquely name launched containers. Explicit names
are used to uniquely identify a single container or to link among
containers. Mutually exclusive with a "count" other than "1".
default: null
version_added: "1.5"
ports:
description:
- "List containing private to public port mapping specification.
Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)'
where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface.
The container ports need to be exposed either in the Dockerfile or via the C(expose) option."
default: null
version_added: "1.5"
expose:
description:
- List of additional container ports to expose for port mappings or links.
If the port is already exposed using EXPOSE in a Dockerfile, you don't
need to expose it again.
default: null
version_added: "1.5"
publish_all_ports:
description:
- Publish all exposed ports to the host interfaces.
default: false
version_added: "1.5"
volumes:
description:
- List of volumes to mount within the container using docker CLI-style
- 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".'
default: null
volumes_from:
description:
- List of names of containers to mount volumes from.
default: null
links:
description:
- List of other containers to link within this container with an optional
- 'alias. Use docker CLI-style syntax: C(redis:myredis).'
default: null
version_added: "1.5"
log_driver:
description:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
"none" disables any logging for the container. docker logs won't be available with this driver.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
default: json-file
choices:
- json-file
- none
- syslog
version_added: "2.0"
memory_limit:
description:
- RAM allocated to the container as a number of bytes or as a human-readable
string like "512MB". Leave as "0" to specify no limit.
default: 0
docker_url:
description:
- URL of the host running the docker daemon. This will default to the env
var DOCKER_HOST if unspecified.
default: ${DOCKER_HOST} or unix://var/run/docker.sock
use_tls:
description:
- Whether to use tls to connect to the docker server. "no" means not to
use tls (and ignore any other tls related parameters). "encrypt" means
to use tls to encrypt the connection to the server. "verify" means to
also verify that the server's certificate is valid for the server
(this both verifies the certificate against the CA and that the
certificate was issued for that host. If this is unspecified, tls will
only be used if one of the other tls options require it.
choices: [ "no", "encrypt", "verify" ]
version_added: "1.9"
tls_client_cert:
description:
- Path to the PEM-encoded certificate used to authenticate docker client.
If specified tls_client_key must be valid
default: ${DOCKER_CERT_PATH}/cert.pem
version_added: "1.9"
tls_client_key:
description:
- Path to the PEM-encoded key used to authenticate docker client. If
specified tls_client_cert must be valid
default: ${DOCKER_CERT_PATH}/key.pem
version_added: "1.9"
tls_ca_cert:
description:
- Path to a PEM-encoded certificate authority to secure the Docker connection.
This has no effect if use_tls is encrypt.
default: ${DOCKER_CERT_PATH}/ca.pem
version_added: "1.9"
tls_hostname:
description:
- A hostname to check matches what's supplied in the docker server's
certificate. If unspecified, the hostname is taken from the docker_url.
default: Taken from docker_url
version_added: "1.9"
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
specified by docker-py.
default: docker-py default remote API version
version_added: "1.8"
username:
description:
- Remote API username.
default: null
password:
description:
- Remote API password.
default: null
email:
description:
- Remote API email.
default: null
hostname:
description:
- Container hostname.
default: null
domainname:
description:
- Container domain name.
default: null
env:
description:
- Pass a dict of environment variables to the container.
default: null
dns:
description:
- List of custom DNS servers for the container.
required: false
default: null
detach:
description:
- Enable detached mode to leave the container running in background. If
disabled, fail unless the process exits cleanly.
default: true
state:
description:
- Assert the container's desired state. "present" only asserts that the
matching containers exist. "started" asserts that the matching
containers both exist and are running, but takes no action if any
configuration has changed. "reloaded" (added in Ansible 1.9) asserts that all matching
containers are running and restarts any that have any images or
configuration out of date. "restarted" unconditionally restarts (or
starts) the matching containers. "stopped" and '"killed" stop and kill
all matching containers. "absent" stops and then' removes any matching
containers.
required: false
default: started
choices:
- present
- started
- reloaded
- restarted
- stopped
- killed
- absent
privileged:
description:
- Whether the container should run in privileged mode or not.
default: false
lxc_conf:
description:
- LXC configuration parameters, such as C(lxc.aa_profile:unconfined).
default: null
stdin_open:
description:
- Keep stdin open after a container is launched.
default: false
version_added: "1.6"
tty:
description:
- Allocate a pseudo-tty within the container.
default: false
version_added: "1.6"
net:
description:
- 'Network mode for the launched container: bridge, none, container:<name|id>'
- or host. Requires docker >= 0.11.
default: false
version_added: "1.8"
pid:
description:
- Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.5.0
required: false
default: None
aliases: []
version_added: "1.9"
registry:
description:
- Remote registry URL to pull images from.
default: DockerHub
aliases: []
version_added: "1.8"
restart_policy:
description:
- Container restart policy.
choices: ["no", "on-failure", "always"]
default: null
version_added: "1.9"
restart_policy_retry:
description:
- Maximum number of times to restart a container. Leave as "0" for unlimited
retries.
default: 0
version_added: "1.9"
extra_hosts:
description:
- Dict of custom host-to-IP mappings to be defined in the container
insecure_registry:
description:
- Use insecure private registry by HTTP instead of HTTPS. Needed for
docker-py >= 0.5.0.
default: false
version_added: "1.9"
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Ash Wilson (@smashwilson)"
requirements:
- "python >= 2.6"
- "docker-py >= 0.3.0"
- "The docker server >= 0.10.0"
'''
EXAMPLES = '''
# Containers are matched either by name (if provided) or by an exact match of
# the image they were launched with and the command they're running. The module
# can accept either a name to target a container uniquely, or a count to operate
# on multiple containers at once when it makes sense to do so.
# Ensure that a data container with the name "mydata" exists. If no container
# by this name exists, it will be created, but not started.
- name: data container
docker:
name: mydata
image: busybox
state: present
volumes:
- /data
# Ensure that a Redis server is running, using the volume from the data
# container. Expose the default Redis port.
- name: redis container
docker:
name: myredis
image: redis
command: redis-server --appendonly yes
state: started
expose:
- 6379
volumes_from:
- mydata
# Ensure that a container of your application server is running. This will:
# - pull the latest version of your application image from DockerHub.
# - ensure that a container is running with the specified name and exact image.
# If any configuration options have changed, the existing container will be
# stopped and removed, and a new one will be launched in its place.
# - link this container to the existing redis container launched above with
# an alias.
# - bind TCP port 9000 within the container to port 8080 on all interfaces
# on the host.
# - bind UDP port 9001 within the container to port 8081 on the host, only
# listening on localhost.
# - set the environment variable SECRET_KEY to "ssssh".
- name: application container
docker:
name: myapplication
image: someuser/appimage
state: reloaded
pull: always
links:
- "myredis:aliasedredis"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
# Ensure that exactly five containers of another server are running with this
# exact image and command. If fewer than five are running, more will be launched;
# if more are running, the excess will be stopped.
- name: load-balanced containers
docker:
state: reloaded
count: 5
image: someuser/anotherappimage
command: sleep 1d
# Unconditionally restart a service container. This may be useful within a
# handler, for example.
- name: application service
docker:
name: myservice
image: someuser/serviceimage
state: restarted
# Stop all containers running the specified image.
- name: obsolete container
docker:
image: someuser/oldandbusted
state: stopped
# Stop and remove a container with the specified name.
- name: obsolete container
docker:
name: ohno
image: someuser/oldandbusted
state: absent
'''
HAS_DOCKER_PY = True
import sys
import json
import os
import shlex
from urlparse import urlparse
try:
import docker.client
import docker.utils
from requests.exceptions import RequestException
except ImportError:
HAS_DOCKER_PY = False
if HAS_DOCKER_PY:
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
def _human_to_bytes(number):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if isinstance(number, int):
return number
if number[-1] == suffixes[0] and number[-2].isdigit():
return number[:-1]
i = 1
for each in suffixes[1:]:
if number[-len(each):] == suffixes[i]:
return int(number[:-len(each)]) * (1024 ** i)
i = i + 1
raise ValueError('Could not convert %s to integer' % (number,))
def _ansible_facts(container_list):
return {"docker_containers": container_list}
def _docker_id_quirk(inspect):
# XXX: some quirk in docker
if 'ID' in inspect:
inspect['Id'] = inspect['ID']
del inspect['ID']
return inspect
def get_split_image_tag(image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def normalize_image(image):
"""
Normalize a Docker image name to include the implied :latest tag.
"""
return ":".join(get_split_image_tag(image))
def is_running(container):
'''Return True if an inspected container is in a state we consider "running."'''
return container['State']['Running'] == True and not container['State'].get('Ghost', False)
def get_docker_py_versioninfo():
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was pushed to pypi. If it's there, use it.
version = []
for part in docker.__version__.split('.'):
try:
version.append(int(part))
except ValueError:
for idx, char in enumerate(part):
if not char.isdigit():
nondigit = part[idx:]
digit = part[:idx]
if digit:
version.append(int(digit))
if nondigit:
version.append(nondigit)
elif hasattr(docker.Client, '_get_raw_response_socket'):
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
version = (0, 3, 0)
else:
# This is untrue but this module does not function with a version less
# than 0.3.0 so it's okay to lie here.
version = (0,)
return tuple(version)
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
versioninfo = get_docker_py_versioninfo()
if versioninfo < (0, 3, 0):
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
class DockerManager(object):
counters = dict(
created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0
)
reload_reasons = []
_capabilities = set()
# Map optional parameters to minimum (docker-py version, server APIVersion)
# docker-py version is a tuple of ints because we have to compare them
# server APIVersion is passed to a docker-py function that takes strings
_cap_ver_req = {
'dns': ((0, 3, 0), '1.10'),
'volumes_from': ((0, 3, 0), '1.10'),
'restart_policy': ((0, 5, 0), '1.14'),
'extra_hosts': ((0, 7, 0), '1.3.1'),
'pid': ((1, 0, 0), '1.17'),
'log_driver': ((1, 2, 0), '1.18'),
# Clientside only
'insecure_registry': ((0, 5, 0), '0.0')
}
def __init__(self, module):
self.module = module
self.binds = None
self.volumes = None
if self.module.params.get('volumes'):
self.binds = {}
self.volumes = []
vols = self.module.params.get('volumes')
for vol in vols:
parts = vol.split(":")
# regular volume
if len(parts) == 1:
self.volumes.append(parts[0])
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
elif 2 <= len(parts) <= 3:
# default to read-write
ro = False
# with supplied bind mode
if len(parts) == 3:
if parts[2] not in ['ro', 'rw']:
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
else:
ro = parts[2] == 'ro'
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
else:
self.module.fail_json(msg='volumes support 1 to 3 arguments')
self.lxc_conf = None
if self.module.params.get('lxc_conf'):
self.lxc_conf = []
options = self.module.params.get('lxc_conf')
for option in options:
parts = option.split(':', 1)
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
self.exposed_ports = None
if self.module.params.get('expose'):
self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
self.port_bindings = None
if self.module.params.get('ports'):
self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
self.links = None
if self.module.params.get('links'):
self.links = self.get_links(self.module.params.get('links'))
self.env = self.module.params.get('env', None)
# Connect to the docker server using any configured host and TLS settings.
env_host = os.getenv('DOCKER_HOST')
env_docker_verify = os.getenv('DOCKER_TLS_VERIFY')
env_cert_path = os.getenv('DOCKER_CERT_PATH')
env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME')
docker_url = module.params.get('docker_url')
if not docker_url:
if env_host:
docker_url = env_host
else:
docker_url = 'unix://var/run/docker.sock'
docker_api_version = module.params.get('docker_api_version')
tls_client_cert = module.params.get('tls_client_cert', None)
if not tls_client_cert and env_cert_path:
tls_client_cert = os.path.join(env_cert_path, 'cert.pem')
tls_client_key = module.params.get('tls_client_key', None)
if not tls_client_key and env_cert_path:
tls_client_key = os.path.join(env_cert_path, 'key.pem')
tls_ca_cert = module.params.get('tls_ca_cert')
if not tls_ca_cert and env_cert_path:
tls_ca_cert = os.path.join(env_cert_path, 'ca.pem')
tls_hostname = module.params.get('tls_hostname')
if tls_hostname is None:
if env_docker_hostname:
tls_hostname = env_docker_hostname
else:
parsed_url = urlparse(docker_url)
if ':' in parsed_url.netloc:
tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
tls_hostname = parsed_url
if not tls_hostname:
tls_hostname = True
# use_tls can be one of four values:
# no: Do not use tls
# encrypt: Use tls. We may do client auth. We will not verify the server
# verify: Use tls. We may do client auth. We will verify the server
# None: Only use tls if the parameters for client auth were specified
# or tls_ca_cert (which requests verifying the server with
# a specific ca certificate)
use_tls = module.params.get('use_tls')
if use_tls is None and env_docker_verify is not None:
use_tls = 'verify'
tls_config = None
if use_tls != 'no':
params = {}
# Setup client auth
if tls_client_cert and tls_client_key:
params['client_cert'] = (tls_client_cert, tls_client_key)
# We're allowed to verify the connection to the server
if use_tls == 'verify' or (use_tls is None and tls_ca_cert):
if tls_ca_cert:
params['ca_cert'] = tls_ca_cert
params['verify'] = True
params['assert_hostname'] = tls_hostname
else:
params['verify'] = True
params['assert_hostname'] = tls_hostname
elif use_tls == 'encrypt':
params['verify'] = False
if params:
# See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296
docker_url = docker_url.replace('tcp://', 'https://')
tls_config = docker.tls.TLSConfig(**params)
self.client = docker.Client(base_url=docker_url,
version=docker_api_version,
tls=tls_config)
self.docker_py_versioninfo = get_docker_py_versioninfo()
def _check_capabilities(self):
"""
Create a list of available capabilities
"""
api_version = self.client.version()['ApiVersion']
for cap, req_vers in self._cap_ver_req.items():
if (self.docker_py_versioninfo >= req_vers[0] and
docker.utils.compare_version(req_vers[1], api_version) >= 0):
self._capabilities.add(cap)
def ensure_capability(self, capability, fail=True):
"""
Some of the functionality this ansible module implements are only
available in newer versions of docker. Ensure that the capability
is available here.
If fail is set to False then return True or False depending on whether
we have the capability. Otherwise, simply fail and exit the module if
we lack the capability.
"""
if not self._capabilities:
self._check_capabilities()
if capability in self._capabilities:
return True
if not fail:
return False
api_version = self.client.version()['ApiVersion']
self.module.fail_json(msg='Specifying the `%s` parameter requires'
' docker-py: %s, docker server apiversion %s; found'
' docker-py: %s, server: %s' % (
capability,
'.'.join(map(str, self._cap_ver_req[capability][0])),
self._cap_ver_req[capability][1],
'.'.join(map(str, self.docker_py_versioninfo)),
api_version))
def get_links(self, links):
"""
Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link
"""
processed_links = {}
for link in links:
parsed_link = link.split(':', 1)
if(len(parsed_link) == 2):
processed_links[parsed_link[0]] = parsed_link[1]
else:
processed_links[parsed_link[0]] = parsed_link[0]
return processed_links
def get_exposed_ports(self, expose_list):
"""
Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
"""
if expose_list:
exposed = []
for port in expose_list:
port = str(port).strip()
if port.endswith('/tcp') or port.endswith('/udp'):
port_with_proto = tuple(port.split('/'))
else:
# assume tcp protocol if not specified
port_with_proto = (port, 'tcp')
exposed.append(port_with_proto)
return exposed
else:
return None
def get_port_bindings(self, ports):
"""
Parse the `ports` string into a port bindings dict for the `start_container` call.
"""
binds = {}
for port in ports:
# ports could potentially be an array like [80, 443], so we make sure they're strings
# before splitting
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
# Bind `container_port` of the container to a dynamically
# allocated TCP port on all available interfaces of the host
# machine.
bind = ('0.0.0.0',)
elif p_len == 2:
# Bind `container_port` of the container to port `parts[0]` on
# all available interfaces of the host machine.
bind = ('0.0.0.0', int(parts[0]))
elif p_len == 3:
# Bind `container_port` of the container to port `parts[1]` on
# IP `parts[0]` of the host machine. If `parts[1]` empty bind
# to a dynamically allocated port of IP `parts[0]`.
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
# append to list if it already exists
old_bind.append(bind)
else:
# otherwise create list that contains the old and new binds
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
def get_summary_message(self):
'''
Generate a message that briefly describes the actions taken by this
task, in English.
'''
parts = []
for k, v in self.counters.iteritems():
if v == 0:
continue
if v == 1:
plural = ""
else:
plural = "s"
parts.append("%s %d container%s" % (k, v, plural))
if parts:
return ", ".join(parts) + "."
else:
return "No action taken."
def get_reload_reason_message(self):
'''
Generate a message describing why any reloaded containers were reloaded.
'''
if self.reload_reasons:
return ", ".join(self.reload_reasons)
else:
return None
def get_summary_counters_msg(self):
msg = ""
for k, v in self.counters.iteritems():
msg = msg + "%s %d " % (k, v)
return msg
def increment_counter(self, name):
self.counters[name] = self.counters[name] + 1
def has_changed(self):
for k, v in self.counters.iteritems():
if v > 0:
return True
return False
def get_inspect_image(self):
try:
return self.client.inspect_image(self.module.params.get('image'))
except DockerAPIError as e:
if e.response.status_code == 404:
return None
else:
raise e
def get_image_repo_tags(self):
image, tag = get_split_image_tag(self.module.params.get('image'))
if tag is None:
tag = 'latest'
resource = '%s:%s' % (image, tag)
for image in self.client.images(name=image):
if resource in image.get('RepoTags', []):
return image['RepoTags']
return []
def get_inspect_containers(self, containers):
inspect = []
for i in containers:
details = self.client.inspect_container(i['Id'])
details = _docker_id_quirk(details)
inspect.append(details)
return inspect
def get_differing_containers(self):
"""
Inspect all matching, running containers, and return those that were
started with parameters that differ from the ones that are provided
during this module run. A list containing the differing
containers will be returned, and a short string describing the specific
difference encountered in each container will be appended to
reload_reasons.
This generates the set of containers that need to be stopped and
started with new parameters with state=reloaded.
"""
running = self.get_running_containers()
current = self.get_inspect_containers(running)
image = self.get_inspect_image()
if image is None:
# The image isn't present. Assume that we're about to pull a new
# tag and *everything* will be restarted.
#
# This will give false positives if you untag an image on the host
# and there's nothing more to pull.
return current
differing = []
for container in current:
# IMAGE
# Compare the image by ID rather than name, so that containers
# will be restarted when new versions of an existing image are
# pulled.
if container['Image'] != image['Id']:
self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id']))
differing.append(container)
continue
# COMMAND
expected_command = self.module.params.get('command')
if expected_command:
expected_command = shlex.split(expected_command)
actual_command = container["Config"]["Cmd"]
if actual_command != expected_command:
self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command))
differing.append(container)
continue
# EXPOSED PORTS
expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys())
for p in (self.exposed_ports or []):
expected_exposed_ports.add("/".join(p))
actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys())
if actually_exposed_ports != expected_exposed_ports:
self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
differing.append(container)
continue
# VOLUMES
expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys())
if self.volumes:
expected_volume_keys.update(self.volumes.keys())
actual_volume_keys = set((container['Config']['Volumes'] or {}).keys())
if actual_volume_keys != expected_volume_keys:
self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys))
differing.append(container)
continue
# MEM_LIMIT
try:
expected_mem = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
actual_mem = container['Config']['Memory']
if expected_mem and actual_mem != expected_mem:
self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem))
differing.append(container)
continue
# ENVIRONMENT
# actual_env is likely to include environment variables injected by
# the Dockerfile.
expected_env = {}
for image_env in image['ContainerConfig']['Env'] or []:
name, value = image_env.split('=', 1)
expected_env[name] = value
if self.env:
for name, value in self.env.iteritems():
expected_env[name] = str(value)
actual_env = {}
for container_env in container['Config']['Env'] or []:
name, value = container_env.split('=', 1)
actual_env[name] = value
if actual_env != expected_env:
# Don't include the environment difference in the output.
self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env))
differing.append(container)
continue
# HOSTNAME
expected_hostname = self.module.params.get('hostname')
actual_hostname = container['Config']['Hostname']
if expected_hostname and actual_hostname != expected_hostname:
self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname))
differing.append(container)
continue
# DOMAINNAME
expected_domainname = self.module.params.get('domainname')
actual_domainname = container['Config']['Domainname']
if expected_domainname and actual_domainname != expected_domainname:
self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname))
differing.append(container)
continue
# DETACH
# We don't have to check for undetached containers. If it wasn't
# detached, it would have stopped before the playbook continued!
# NAME
# We also don't have to check name, because this is one of the
# criteria that's used to determine which container(s) match in
# the first place.
# STDIN_OPEN
expected_stdin_open = self.module.params.get('stdin_open')
actual_stdin_open = container['Config']['AttachStdin']
if actual_stdin_open != expected_stdin_open:
self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open))
differing.append(container)
continue
# TTY
expected_tty = self.module.params.get('tty')
actual_tty = container['Config']['Tty']
if actual_tty != expected_tty:
self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty))
differing.append(container)
continue
# -- "start" call differences --
# LXC_CONF
if self.lxc_conf:
expected_lxc = set(self.lxc_conf)
actual_lxc = set(container['HostConfig']['LxcConf'] or [])
if actual_lxc != expected_lxc:
self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc))
differing.append(container)
continue
# BINDS
expected_binds = set()
if self.binds:
for host_path, config in self.binds.iteritems():
if isinstance(config, dict):
container_path = config['bind']
if config['ro']:
mode = 'ro'
else:
mode = 'rw'
else:
container_path = config
mode = 'rw'
expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode))
actual_binds = set()
for bind in (container['HostConfig']['Binds'] or []):
if len(bind.split(':')) == 2:
actual_binds.add(bind + ":rw")
else:
actual_binds.add(bind)
if actual_binds != expected_binds:
self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds))
differing.append(container)
continue
# PORT BINDINGS
expected_bound_ports = {}
if self.port_bindings:
for container_port, config in self.port_bindings.iteritems():
if isinstance(container_port, int):
container_port = "{0}/tcp".format(container_port)
if len(config) == 1:
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for hostip, hostport in config:
expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
actual_bound_ports = container['HostConfig']['PortBindings'] or {}
if actual_bound_ports != expected_bound_ports:
self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports))
differing.append(container)
continue
# PUBLISHING ALL PORTS
# What we really care about is the set of ports that is actually
# published. That should be caught above.
# PRIVILEGED
expected_privileged = self.module.params.get('privileged')
actual_privileged = container['HostConfig']['Privileged']
if actual_privileged != expected_privileged:
self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged))
differing.append(container)
continue
# LINKS
expected_links = set()
for link, alias in (self.links or {}).iteritems():
expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias))
actual_links = set(container['HostConfig']['Links'] or [])
if actual_links != expected_links:
self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links))
differing.append(container)
continue
# NETWORK MODE
expected_netmode = self.module.params.get('net') or 'bridge'
actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
if actual_netmode != expected_netmode:
self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
differing.append(container)
continue
# DNS
expected_dns = set(self.module.params.get('dns') or [])
actual_dns = set(container['HostConfig']['Dns'] or [])
if actual_dns != expected_dns:
self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns))
differing.append(container)
continue
# VOLUMES_FROM
expected_volumes_from = set(self.module.params.get('volumes_from') or [])
actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or [])
if actual_volumes_from != expected_volumes_from:
self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
differing.append(container)
# LOG_DRIVER
if self.ensure_capability('log_driver', False) :
expected_log_driver = self.module.params.get('log_driver') or 'json-file'
actual_log_driver = container['HostConfig']['LogConfig']['Type']
if actual_log_driver != expected_log_driver:
self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
differing.append(container)
continue
return differing
def get_deployed_containers(self):
"""
Return any matching containers that are already present.
"""
command = self.module.params.get('command')
if command:
command = command.strip()
name = self.module.params.get('name')
if name and not name.startswith('/'):
name = '/' + name
deployed = []
# "images" will be a collection of equivalent "name:tag" image names
# that map to the same Docker image.
inspected = self.get_inspect_image()
if inspected:
repo_tags = self.get_image_repo_tags()
else:
repo_tags = [normalize_image(self.module.params.get('image'))]
for container in self.client.containers(all=True):
details = None
if name:
name_list = container.get('Names')
if name_list is None:
name_list = []
matches = name in name_list
else:
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
running_image = normalize_image(details['Config']['Image'])
running_command = container['Command'].strip()
image_matches = running_image in repo_tags
# if a container has an entrypoint, `command` will actually equal
# '{} {}'.format(entrypoint, command)
command_matches = (not command or running_command.endswith(command))
matches = image_matches and command_matches
if matches:
if not details:
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
deployed.append(details)
return deployed
def get_running_containers(self):
return [c for c in self.get_deployed_containers() if is_running(c)]
def pull_image(self):
extra_params = {}
if self.module.params.get('insecure_registry'):
if self.ensure_capability('insecure_registry', fail=False):
extra_params['insecure_registry'] = self.module.params.get('insecure_registry')
resource = self.module.params.get('image')
image, tag = get_split_image_tag(resource)
if self.module.params.get('username'):
try:
self.client.login(
self.module.params.get('username'),
password=self.module.params.get('password'),
email=self.module.params.get('email'),
registry=self.module.params.get('registry')
)
except Exception as e:
self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e))
try:
changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
try:
last = changes[-1]
except IndexError:
last = '{}'
status = json.loads(last).get('status', '')
if status.startswith('Status: Image is up to date for'):
# Image is already up to date. Don't increment the counter.
pass
elif (status.startswith('Status: Downloaded newer image for') or
status.startswith('Download complete')):
# Image was updated. Increment the pull counter.
self.increment_counter('pulled')
else:
# Unrecognized status string.
self.module.fail_json(msg="Unrecognized status from pull.", status=status, changes=changes)
except Exception as e:
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
def create_host_config(self):
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
}
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
if optionals['pid'] is not None:
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
if optionals['extra_hosts'] is not None:
self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts']
if optionals['log_driver'] is not None:
self.ensure_capability('log_driver')
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
log_config.type = optionals['log_driver']
params['log_config'] = log_config
return docker.utils.create_host_config(**params)
def create_containers(self, count=1):
try:
mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'mem_limit': mem_limit,
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
'host_config': self.create_host_config(),
}
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except:
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
for i in containers:
self.client.start(i)
self.increment_counter('started')
if not self.module.params.get('detach'):
status = self.client.wait(i['Id'])
if status != 0:
output = self.client.logs(i['Id'], stdout=True, stderr=True,
stream=False, timestamps=False)
self.module.fail_json(status=status, msg=output)
def stop_containers(self, containers):
for i in containers:
self.client.stop(i['Id'])
self.increment_counter('stopped')
return [self.client.wait(i['Id']) for i in containers]
def remove_containers(self, containers):
for i in containers:
self.client.remove_container(i['Id'])
self.increment_counter('removed')
def kill_containers(self, containers):
for i in containers:
self.client.kill(i['Id'])
self.increment_counter('killed')
def restart_containers(self, containers):
for i in containers:
self.client.restart(i['Id'])
self.increment_counter('restarted')
class ContainerSet:
def __init__(self, manager):
self.manager = manager
self.running = []
self.deployed = []
self.changed = []
def refresh(self):
'''
Update our view of the matching containers from the Docker daemon.
'''
self.deployed = self.manager.get_deployed_containers()
self.running = [c for c in self.deployed if is_running(c)]
def notice_changed(self, containers):
'''
Record a collection of containers as "changed".
'''
self.changed.extend(containers)
def present(manager, containers, count, name):
'''Ensure that exactly `count` matching containers exist in any state.'''
containers.refresh()
delta = count - len(containers.deployed)
if delta > 0:
containers.notice_changed(manager.create_containers(delta))
if delta < 0:
# If both running and stopped containers exist, remove
# stopped containers first.
containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy)))
to_stop = []
to_remove = []
for c in containers.deployed[0:-delta]:
if is_running(c):
to_stop.append(c)
to_remove.append(c)
manager.stop_containers(to_stop)
manager.remove_containers(to_remove)
containers.notice_changed(to_remove)
def started(manager, containers, count, name):
'''Ensure that exactly `count` matching containers exist and are running.'''
containers.refresh()
delta = count - len(containers.running)
if delta > 0:
if name and containers.deployed:
# A stopped container exists with the requested name.
# Clean it up before attempting to start a new one.
manager.remove_containers(containers.deployed)
created = manager.create_containers(delta)
manager.start_containers(created)
containers.notice_changed(created)
if delta < 0:
excess = containers.running[0:-delta]
manager.stop_containers(excess)
manager.remove_containers(excess)
containers.notice_changed(excess)
def reloaded(manager, containers, count, name):
'''
Ensure that exactly `count` matching containers exist and are
running. If any associated settings have been changed (volumes,
ports or so on), restart those containers.
'''
containers.refresh()
for container in manager.get_differing_containers():
manager.stop_containers([container])
manager.remove_containers([container])
started(manager, containers, count, name)
def restarted(manager, containers, count, name):
'''
Ensure that exactly `count` matching containers exist and are
running. Unconditionally restart any that were already running.
'''
containers.refresh()
manager.restart_containers(containers.running)
started(manager, containers, count, name)
def stopped(manager, containers, count, name):
'''Stop any matching containers that are running.'''
containers.refresh()
manager.stop_containers(containers.running)
containers.notice_changed(containers.running)
def killed(manager, containers, count, name):
'''Kill any matching containers that are running.'''
containers.refresh()
manager.kill_containers(containers.running)
containers.notice_changed(containers.running)
def absent(manager, containers, count, name):
'''Stop and remove any matching containers.'''
containers.refresh()
manager.stop_containers(containers.running)
manager.remove_containers(containers.deployed)
containers.notice_changed(containers.deployed)
def main():
module = AnsibleModule(
argument_spec = dict(
count = dict(default=1),
image = dict(required=True),
pull = dict(required=False, default='missing', choices=['missing', 'always']),
command = dict(required=False, default=None),
expose = dict(required=False, default=None, type='list'),
ports = dict(required=False, default=None, type='list'),
publish_all_ports = dict(default=False, type='bool'),
volumes = dict(default=None, type='list'),
volumes_from = dict(default=None),
links = dict(default=None, type='list'),
memory_limit = dict(default=0),
memory_swap = dict(default=0),
docker_url = dict(),
use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']),
tls_client_cert = dict(required=False, default=None, type='str'),
tls_client_key = dict(required=False, default=None, type='str'),
tls_ca_cert = dict(required=False, default=None, type='str'),
tls_hostname = dict(required=False, type='str', default=None),
docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'),
username = dict(default=None),
password = dict(),
email = dict(),
registry = dict(),
hostname = dict(default=None),
domainname = dict(default=None),
env = dict(type='dict'),
dns = dict(),
detach = dict(default=True, type='bool'),
state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
restart_policy_retry = dict(default=0, type='int'),
extra_hosts = dict(type='dict'),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'),
tty = dict(default=False, type='bool'),
lxc_conf = dict(default=None, type='list'),
name = dict(default=None),
net = dict(default=None),
pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
),
required_together = (
['tls_client_cert', 'tls_client_key'],
),
)
check_dependencies(module)
try:
manager = DockerManager(module)
count = int(module.params.get('count'))
name = module.params.get('name')
pull = module.params.get('pull')
state = module.params.get('state')
if state == 'running':
# Renamed running to started in 1.9
state = 'started'
if count < 0:
module.fail_json(msg="Count must be greater than zero")
if count > 1 and name:
module.fail_json(msg="Count and name must not be used together")
# Explicitly pull new container images, if requested.
# Do this before noticing running and deployed containers so that the image names will differ
# if a newer image has been pulled.
if pull == "always":
manager.pull_image()
containers = ContainerSet(manager)
if state == 'present':
present(manager, containers, count, name)
elif state == 'started':
started(manager, containers, count, name)
elif state == 'reloaded':
reloaded(manager, containers, count, name)
elif state == 'restarted':
restarted(manager, containers, count, name)
elif state == 'stopped':
stopped(manager, containers, count, name)
elif state == 'killed':
killed(manager, containers, count, name)
elif state == 'absent':
absent(manager, containers, count, name)
else:
module.fail_json(msg='Unrecognized state %s. Must be one of: '
'present; started; reloaded; restarted; '
'stopped; killed; absent.' % state)
module.exit_json(changed=manager.has_changed(),
msg=manager.get_summary_message(),
summary=manager.counters,
containers=containers.changed,
reload_reasons=manager.get_reload_reason_message(),
ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
except DockerAPIError as e:
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)
except RequestException as e:
module.fail_json(changed=manager.has_changed(), msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
SaranyaKarthikeyan/boto | boto/services/sonofmmm.py | 170 | 3498 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.services.service import Service
from boto.services.message import ServiceMessage
import os
import mimetypes
class SonOfMMM(Service):
def __init__(self, config_file=None):
super(SonOfMMM, self).__init__(config_file)
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.working_dir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
if self.sd.has_option('ffmpeg_args'):
self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
else:
self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
self.output_mimetype = self.sd.get('output_mimetype')
if self.sd.has_option('output_ext'):
self.output_ext = self.sd.get('output_ext')
else:
self.output_ext = mimetypes.guess_extension(self.output_mimetype)
self.output_bucket = self.sd.get_obj('output_bucket')
self.input_bucket = self.sd.get_obj('input_bucket')
# check to see if there are any messages queue
# if not, create messages for all files in input_bucket
m = self.input_queue.read(1)
if not m:
self.queue_files()
def queue_files(self):
boto.log.info('Queueing files from %s' % self.input_bucket.name)
for key in self.input_bucket:
boto.log.info('Queueing %s' % key.name)
m = ServiceMessage()
if self.output_bucket:
d = {'OutputBucket' : self.output_bucket.name}
else:
d = None
m.for_key(key, d)
self.input_queue.write(m)
def process_file(self, in_file_name, msg):
base, ext = os.path.splitext(in_file_name)
out_file_name = os.path.join(self.working_dir,
base+self.output_ext)
command = self.command % (in_file_name, out_file_name)
boto.log.info('running:\n%s' % command)
status = self.run(command)
if status == 0:
return [(out_file_name, self.output_mimetype)]
else:
return []
def shutdown(self):
if os.path.isfile(self.log_path):
if self.output_bucket:
key = self.output_bucket.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
super(SonOfMMM, self).shutdown()
| mit |
geekboxzone/lollipop_external_chromium_org_third_party_webrtc | build/extra_gitignore.py | 40 | 1443 | #!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = MODIFY_STRING % argv[0]
gitignore_file = os.path.dirname(argv[0]) + '/../../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
# Look for modify_string in the file to ensure we don't append the extra
# patterns more than once.
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
dlu-ch/dlb | test/dlb_contrib/test_clike.py | 1 | 11229 | # SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <[email protected]>
import testenv # also sets up module search paths
import dlb.fs
import dlb_contrib.clike
import os
import textwrap
import unittest
class RegexTest(unittest.TestCase):
def test_simple_identifier(self):
self.assertTrue(dlb_contrib.clike.SIMPLE_IDENTIFIER_REGEX.match('_a1Z'))
self.assertFalse(dlb_contrib.clike.SIMPLE_IDENTIFIER_REGEX.match(''))
self.assertFalse(dlb_contrib.clike.SIMPLE_IDENTIFIER_REGEX.match('1a'))
self.assertFalse(dlb_contrib.clike.SIMPLE_IDENTIFIER_REGEX.match('a::b'))
self.assertTrue(dlb_contrib.clike.SIMPLE_IDENTIFIER_REGEX.match('a' * 100))
def test_identifier(self):
self.assertTrue(dlb_contrib.clike.IDENTIFIER_REGEX.match('_a1Z\\u1a3F\\UA234567f'))
self.assertFalse(dlb_contrib.clike.IDENTIFIER_REGEX.match('\\u1a3'))
self.assertFalse(dlb_contrib.clike.IDENTIFIER_REGEX.match('\\UA234567'))
def test_portable_c_identifier(self):
self.assertTrue(dlb_contrib.clike.PORTABLE_C_IDENTIFIER_REGEX.match('a' * 31))
self.assertFalse(dlb_contrib.clike.PORTABLE_C_IDENTIFIER_REGEX.match('a' * 32))
def test_macro(self):
self.assertTrue(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z()'))
self.assertTrue(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z(x)'))
self.assertTrue(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z(...)'))
self.assertTrue(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z(x, y, ...)'))
self.assertTrue(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z( x , y , ... )'))
self.assertEqual('_a1Z',
dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z(x, y, ...)').group('name'))
self.assertEqual('x, y, ...',
dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z(x, y, ...)').group('arguments'))
self.assertFalse(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z'))
self.assertFalse(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z ()'))
self.assertFalse(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z('))
self.assertFalse(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z( ..., x )'))
self.assertFalse(dlb_contrib.clike.FUNCTIONLIKE_MACRO_REGEX.match('_a1Z( x, ..., y )'))
class StringLiteralFromBytesTest(unittest.TestCase):
def test_typical_is_unchanged(self):
self.assertEqual('""', dlb_contrib.clike.string_literal_from_bytes(b''))
self.assertEqual('"abc 42!"', dlb_contrib.clike.string_literal_from_bytes(b'abc 42!'))
def test_non_printable_and_quote_is_replaced(self):
self.assertEqual('"a\\x0A\\x22\\x60\\x5Cx"', dlb_contrib.clike.string_literal_from_bytes(b'a\n"`\\x'))
def test_contains_no_hexdigit_after_escape(self):
self.assertEqual('"a\\x0A" "b\\x00" "F\\x22" "b"', dlb_contrib.clike.string_literal_from_bytes(b'a\nb\0F"b'))
def test_contains_no_trigraph(self):
s = dlb_contrib.clike.string_literal_from_bytes(b'a??=b')
self.assertNotIn('??', s)
def test_break_long_into_several_lines(self):
s = dlb_contrib.clike.string_literal_from_bytes(b'.' * 10, 5)
self.assertEqual('"..."\n"..."\n"..."\n"."', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'.' * 3, 0)
self.assertEqual('"."\n"."\n"."', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'abc\n', 9)
self.assertEqual('"abc\\x0A"', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'abc\n', 8)
self.assertEqual('"abc"\n"\\x0A"', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'abc\nd', 13)
self.assertEqual('"abc\\x0A" "d"', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'abc\nd', 12)
self.assertEqual('"abc\\x0A"\n"d"', s)
s = dlb_contrib.clike.string_literal_from_bytes(b'abc\nd', 9)
self.assertEqual('"abc\\x0A"\n"d"', s)
class IdentifierLikeFromStringTest(unittest.TestCase):
def test_only_basecharacters(self):
s = dlb_contrib.clike.identifier_like_from_string('')
self.assertEqual('', s)
s = dlb_contrib.clike.identifier_like_from_string('abc')
self.assertEqual('abc', s)
s = dlb_contrib.clike.identifier_like_from_string('abC_Def_')
self.assertEqual('abC_Def__', s)
def test_mixed(self):
s = dlb_contrib.clike.identifier_like_from_string('Säu\\li')
self.assertEqual('S_u_li_08V02I', s)
s = dlb_contrib.clike.identifier_like_from_string('test.h')
self.assertEqual('test_h_06D', s)
def test_special_slash(self):
s = dlb_contrib.clike.identifier_like_from_string('src/generated/version.h', sep='/')
self.assertEqual('src_generated_version_h_26D', s)
class IdentifierFromPathTest(unittest.TestCase):
def test_fails_for_absolute(self):
with self.assertRaises(ValueError):
dlb_contrib.clike.identifier_from_path(dlb.fs.Path('/a'))
def test_dot(self):
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('.'))
self.assertEqual('___06D', s)
def test_dotdot(self):
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('../'))
self.assertEqual('____06D06D', s)
def test_typical_source_file_path(self):
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('src'))
self.assertEqual('SRC', s)
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('src/io/print.h'))
self.assertEqual('SRC_IO_PRINT_H_26D', s)
def test_typical_file_path(self):
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('s-rc/i_o/p+rint.h'))
self.assertEqual('S_RC_I_O_P_RINT_H_05D15I13D06D', s)
def test_untypical_file_path(self):
s = dlb_contrib.clike.identifier_from_path(dlb.fs.Path('säü\\li'))
self.assertEqual('S___LI_06S00V02I', s)
class GenerateHeaderFileTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_scenario1(self):
class GenerateVersionFile(dlb_contrib.clike.GenerateHeaderFile):
COMPONENT_ID = 42
VERSION = '1.2.3c4-dev2+a2d66f1d?'
PATH_COMPONENTS_TO_STRIP = 1
def write_content(self, file):
version = dlb_contrib.clike.string_literal_from_bytes(self.VERSION.encode())
file.write(f'\n#define COMPONENT_{self.COMPONENT_ID}_WD_VERSION {version}\n')
os.makedirs(os.path.join('src', 'Generated'))
with dlb.ex.Context():
GenerateVersionFile(output_file='src/Generated/Version.h').start()
with open(os.path.join('src', 'Generated', 'Version.h'), 'r') as f:
content = f.read()
expected_content = \
"""
// This file was created automatically.
// Do not modify it manually.
#ifndef GENERATED_VERSION_H_16D_
#define GENERATED_VERSION_H_16D_
#define COMPONENT_42_WD_VERSION "1.2.3c4-dev2+a2d66f1d?"
#endif // GENERATED_VERSION_H_16D_
"""
self.assertEqual(textwrap.dedent(expected_content).lstrip(), content)
def test_creates_include_guard(self):
with dlb.ex.Context():
dlb_contrib.clike.GenerateHeaderFile(output_file='Version.h').start()
with open('Version.h', 'r') as f:
content = f.read()
expected_content = \
"""
// This file was created automatically.
// Do not modify it manually.
#ifndef VERSION_H_06D_
#define VERSION_H_06D_
#endif // VERSION_H_06D_
"""
self.assertEqual(textwrap.dedent(expected_content).lstrip(), content)
def test_fails_for_nonidentifier_guard(self):
class GenerateVersionFile(dlb_contrib.clike.GenerateHeaderFile):
INCLUDE_GUARD_PREFIX = '1'
def write_content(self, file):
pass
with self.assertRaises(ValueError):
with dlb.ex.Context():
GenerateVersionFile(file='empty.h').start()
def test_fails_for_too_many_stripped_components(self):
class GenerateVersionFile(dlb_contrib.clike.GenerateHeaderFile):
PATH_COMPONENTS_TO_STRIP = 1
def write_content(self, file):
pass
with self.assertRaises(ValueError):
with dlb.ex.Context():
GenerateVersionFile(file='empty.h').start()
class CCompileCheckTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_fails(self):
with dlb.ex.Context():
with self.assertRaises(NotImplementedError):
self.assertFalse(dlb_contrib.clike.ClikeCompiler.does_source_compile(''))
def test_restores_configuration_on_failure(self):
old_level_redo_reason = dlb.cf.level.redo_reason
old_level_redo_start = dlb.cf.level.redo_start
old_execute_helper_inherits_files_by_default = dlb.cf.execute_helper_inherits_files_by_default
dlb.cf.level.redo_reason = dlb.di.INFO
dlb.cf.level.redo_start = dlb.di.ERROR
dlb.cf.execute_helper_inherits_files_by_default = True
try:
with dlb.ex.Context():
with self.assertRaises(NotImplementedError):
dlb_contrib.clike.ClikeCompiler.does_source_compile('')
self.assertEqual(dlb.di.INFO, dlb.cf.level.redo_reason)
self.assertEqual(dlb.di.ERROR, dlb.cf.level.redo_start)
self.assertIs(True, dlb.cf.execute_helper_inherits_files_by_default)
finally:
dlb.cf.level.redo_reason = old_level_redo_reason
dlb.cf.level.redo_start = old_level_redo_start
dlb.cf.execute_helper_inherits_files_by_default = old_execute_helper_inherits_files_by_default
class CConstantConditionCheckTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_fails_for_valid(self):
with dlb.ex.Context():
with self.assertRaises(NotImplementedError):
dlb_contrib.clike.ClikeCompiler.check_constant_expression('1 < 2')
def test_fails_for_byte_string_argument(self):
with dlb.ex.Context():
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
dlb_contrib.clike.ClikeCompiler.check_constant_expression(b'')
msg = "'constant_expression' must be a str"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
with self.assertRaises(TypeError) as cm:
# noinspection PyTypeChecker
dlb_contrib.clike.ClikeCompiler.check_constant_expression('', preamble=b'')
msg = "'preamble' must be a str"
self.assertEqual(msg, str(cm.exception))
class CSizeOfCheckTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_fails(self):
with dlb.ex.Context():
with self.assertRaises(NotImplementedError):
dlb_contrib.clike.ClikeCompiler.get_size_of('int')
| gpl-3.0 |
nrhine1/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
worldforge/atlas-cpp | src/Atlas-Python/tests/test_server.py | 1 | 3383 | #test TCP/IP server
#Copyright 2002 by AIR-IX SUUNNITTELU/Ahiplan Oy
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import test_objects
import importlib
importlib.reload(test_objects)
from test_objects import *
import os, time
import pdb
#pdb.set_trace()
print_debug = 0
if print_debug:
print("test_server:")
import atlas.util.debug
atlas.util.debug.debug_flag = 0
import atlas
import atlas.transport.TCP.server
import atlas.transport.TCP.client
from atlas.transport.connection import args2address
class TestServer(atlas.transport.TCP.server.SocketServer):
def loop(self):
self.waiting = 1
while self.waiting or self.clients2send:
self.process_communication()
self.idle()
if print_debug:
print(self.waiting, self.clients2send)
class TestConnection(atlas.transport.TCP.server.TcpClient):
def talk_op(self, op):
if print_debug:
print(repr(str(op)))
self.server.str_op = str(op)
reply = atlas.Operation("sound",
atlas.Operation("talk",
atlas.Object(say="Hello %s!" % op.from_),
from_=op.from_),
from_=op.from_
)
self.reply_operation(op, reply)
self.server.waiting = 0
class TestClient(atlas.transport.TCP.client.TcpClient):
def sound_op(self, op):
self.waiting = 0
self.str_op = str(op)
if print_debug:
print(repr(str(op)))
def loop(self):
op = atlas.Operation("talk",
atlas.Object(say="Hello world!"),
from_="Joe")
self.send_operation(op)
self.waiting = 1
while self.waiting:
time.sleep(0.1)
self.process_communication()
tserver = TestServer("test server", args2address(sys.argv), TestConnection)
res = os.fork()
if res==0:
tclient = TestClient("test client", args2address(sys.argv))
tclient.connect_and_negotiate()
tclient.loop()
assert(tclient.str_op=='{\012\011arg: {\012\011\011arg: {\012\011\011\011say: "Hello Joe!"\012\011\011},\012\011\011from: "Joe",\012\011\011objtype: "op",\012\011\011parents: ["talk"]\012\011},\012\011from: "Joe",\012\011objtype: "op",\012\011parents: ["sound"]\012}\012')
if print_debug:
print("client exits")
else:
tserver.loop()
assert(tserver.str_op=='{\012\011arg: {\012\011\011say: "Hello world!"\012\011},\012\011from: "Joe",\012\011objtype: "op",\012\011parents: ["talk"]\012}\012')
if print_debug:
print("server exits")
os.wait()
| lgpl-2.1 |
abridgett/boto | tests/unit/emr/test_instance_group_args.py | 112 | 2056 | #!/usr/bin/env python
# Author: Charlie Schluting <[email protected]>
#
# Test to ensure initalization of InstanceGroup object emits appropriate errors
# if bidprice is not specified, but allows float, int, Decimal.
from decimal import Decimal
from tests.compat import unittest
from boto.emr.instance_group import InstanceGroup
class TestInstanceGroupArgs(unittest.TestCase):
def test_bidprice_missing_spot(self):
"""
Test InstanceGroup init raises ValueError when market==spot and
bidprice is not specified.
"""
with self.assertRaisesRegexp(ValueError, 'bidprice must be specified'):
InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master')
def test_bidprice_missing_ondemand(self):
"""
Test InstanceGroup init accepts a missing bidprice arg, when market is
ON_DEMAND.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'ON_DEMAND', 'master')
def test_bidprice_Decimal(self):
"""
Test InstanceGroup init works with bidprice type = Decimal.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice=Decimal(1.10))
self.assertEquals('1.10', instance_group.bidprice[:4])
def test_bidprice_float(self):
"""
Test InstanceGroup init works with bidprice type = float.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice=1.1)
self.assertEquals('1.1', instance_group.bidprice)
def test_bidprice_string(self):
"""
Test InstanceGroup init works with bidprice type = string.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice='1.1')
self.assertEquals('1.1', instance_group.bidprice)
if __name__ == "__main__":
unittest.main()
| mit |
SuporteCTRL/suitesaber | htdocs/site/site/bvs-mod/FCKeditor/editor/filemanager/connectors/py/upload.py | 44 | 3123 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| gpl-3.0 |
CyanogenMod/android_kernel_oneplus_msm8974 | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
40223210/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/_imp.py | 625 | 2115 | """(Extremely) low-level import machinery bits as used by importlib and imp."""
class __loader__(object):pass
def _fix_co_filename(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:_fix_co_filename'))
def acquire_lock(*args,**kw):
"""acquire_lock() -> None Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety
when importing modules.
On platforms without threads, this function does nothing."""
pass #assume we are a platform without threads
#raise NotImplementedError("%s:not implemented" % ('_imp.py:acquire_lock'))
def extension_suffixes(*args,**kw):
"""extension_suffixes() -> list of strings Returns the list of file suffixes used to identify extension modules."""
return ['.pyd']
def get_frozen_object(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:get_frozen_object'))
def init_builtin(module,*args,**kw):
return __import__(module)
def init_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:init_frozen'))
def is_builtin(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_builtin'))
def is_frozen(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen'))
def is_frozen_package(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:is_frozen_package'))
def load_dynamic(*args,**kw):
raise NotImplementedError("%s:not implemented" % ('_imp.py:load_dynamic'))
def lock_held(*args,**kw):
"""lock_held() -> boolean Return True if the import lock is currently held, else False.
On platforms without threads, return False."""
return False
#raise NotImplementedError("%s:not implemented" % ('_imp.py:lock_held'))
def release_lock(*args,**kw):
"""release_lock() -> None Release the interpreter's import lock.
On platforms without threads, this function does nothing."""
pass #assume no threads
#raise NotImplementedError("%s:not implemented" % ('_imp.py:release_lock'))
| gpl-3.0 |
cchurch/ansible | lib/ansible/modules/network/meraki/meraki_admin.py | 5 | 17464 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_admin
short_description: Manage administrators in the Meraki cloud
version_added: '2.6'
description:
- Allows for creation, management, and visibility into administrators within Meraki.
options:
name:
description:
- Name of the dashboard administrator.
- Required when creating a new administrator.
type: str
email:
description:
- Email address for the dashboard administrator.
- Email cannot be updated.
- Required when creating or editing an administrator.
type: str
org_access:
description:
- Privileges assigned to the administrator in the organization.
aliases: [ orgAccess ]
choices: [ full, none, read-only ]
type: str
tags:
description:
- Tags the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
- If C(none) is specified, C(network) or C(tags) must be specified.
suboptions:
tag:
description:
- Object tag which privileges should be assigned.
type: str
access:
description:
- The privilege of the dashboard administrator for the tag.
type: str
networks:
description:
- List of networks the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
suboptions:
id:
description:
- Network ID for which administrator should have privileges assigned.
type: str
access:
description:
- The privilege of the dashboard administrator on the network.
- Valid options are C(full), C(read-only), or C(none).
type: str
state:
description:
- Create or modify, or delete an organization
- If C(state) is C(absent), name takes priority over email if both are specified.
choices: [ absent, present, query ]
required: true
type: str
org_name:
description:
- Name of organization.
- Used when C(name) should refer to another object.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
aliases: ['organization']
type: str
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query information about all administrators associated to the organization
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query information about a single administrator by name
meraki_admin:
auth_key: abc12345
org_id: 12345
state: query
name: Jane Doe
- name: Query information about a single administrator by email
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
email: [email protected]
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Create a new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: [email protected]
- name: Revoke access to an organization for an administrator
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: absent
email: [email protected]
- name: Create a new administrator with full access to two tags
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: [email protected]
tags:
- tag: tenant
access: full
- tag: corporate
access: read-only
- name: Create a new administrator with full access to a network
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: [email protected]
networks:
- id: N_12345
access: full
'''
RETURN = r'''
data:
description: List of administrators.
returned: success
type: complex
contains:
email:
description: Email address of administrator.
returned: success
type: str
sample: [email protected]
id:
description: Unique identification number of administrator.
returned: success
type: str
sample: 1234567890
name:
description: Given name of administrator.
returned: success
type: str
sample: John Doe
account_status:
description: Status of account.
returned: success
type: str
sample: ok
two_factor_auth_enabled:
description: Enabled state of two-factor authentication for administrator.
returned: success
type: bool
sample: false
has_api_key:
description: Defines whether administrator has an API assigned to their account.
returned: success
type: bool
sample: false
last_active:
description: Date and time of time the administrator was active within Dashboard.
returned: success
type: str
sample: 2019-01-28 14:58:56 -0800
networks:
description: List of networks administrator has access on.
returned: success
type: complex
contains:
id:
description: The network ID.
returned: when network permissions are set
type: str
sample: N_0123456789
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when network permissions are set
type: str
sample: read-only
tags:
description: Tags the adminsitrator has access on.
returned: success
type: complex
contains:
tag:
description: Tag name.
returned: when tag permissions are set
type: str
sample: production
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when tag permissions are set
type: str
sample: full
org_access:
description: The privilege of the dashboard administrator on the organization. Options are 'full', 'read-only', or 'none'.
returned: success
type: str
sample: full
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def get_admins(meraki, org_id):
admins = meraki.request(
meraki.construct_path(
'query',
function='admin',
org_id=org_id
),
method='GET'
)
if meraki.status == 200:
return admins
def get_admin_id(meraki, data, name=None, email=None):
admin_id = None
for a in data:
if meraki.params['name'] is not None:
if meraki.params['name'] == a['name']:
if admin_id is not None:
meraki.fail_json(msg='There are multiple administrators with the same name')
else:
admin_id = a['id']
elif meraki.params['email']:
if meraki.params['email'] == a['email']:
return a['id']
if admin_id is None:
meraki.fail_json(msg='No admin_id found')
return admin_id
def get_admin(meraki, data, id):
for a in data:
if a['id'] == id:
return a
meraki.fail_json(msg='No admin found by specified name or email')
def find_admin(meraki, data, email):
for a in data:
if a['email'] == email:
return a
return None
def delete_admin(meraki, org_id, admin_id):
path = meraki.construct_path('revoke', 'admin', org_id=org_id) + admin_id
r = meraki.request(path,
method='DELETE'
)
if meraki.status == 204:
return r
def network_factory(meraki, networks, nets):
networks = json.loads(networks)
networks_new = []
for n in networks:
networks_new.append({'id': meraki.get_net_id(org_name=meraki.params['org_name'],
net_name=n['network'],
data=nets),
'access': n['access']
})
return networks_new
def create_admin(meraki, org_id, name, email):
payload = dict()
payload['name'] = name
payload['email'] = email
is_admin_existing = find_admin(meraki, get_admins(meraki, org_id), email)
if meraki.params['org_access'] is not None:
payload['orgAccess'] = meraki.params['org_access']
if meraki.params['tags'] is not None:
payload['tags'] = json.loads(meraki.params['tags'])
if meraki.params['networks'] is not None:
nets = meraki.get_nets(org_id=org_id)
networks = network_factory(meraki, meraki.params['networks'], nets)
payload['networks'] = networks
if is_admin_existing is None: # Create new admin
if meraki.module.check_mode is True:
meraki.result['data'] = payload
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
path = meraki.construct_path('create', function='admin', org_id=org_id)
r = meraki.request(path,
method='POST',
payload=json.dumps(payload)
)
if meraki.status == 201:
meraki.result['changed'] = True
return r
elif is_admin_existing is not None: # Update existing admin
if not meraki.params['tags']:
payload['tags'] = []
if not meraki.params['networks']:
payload['networks'] = []
if meraki.is_update_required(is_admin_existing, payload) is True:
if meraki.module.check_mode is True:
diff = recursive_diff(is_admin_existing, payload)
is_admin_existing.update(payload)
meraki.result['diff'] = {'before': diff[0],
'after': diff[1],
}
meraki.result['changed'] = True
meraki.result['data'] = payload
meraki.exit_json(**meraki.result)
path = meraki.construct_path('update', function='admin', org_id=org_id) + is_admin_existing['id']
r = meraki.request(path,
method='PUT',
payload=json.dumps(payload)
)
if meraki.status == 200:
meraki.result['changed'] = True
return r
else:
meraki.result['data'] = is_admin_existing
if meraki.module.check_mode is True:
meraki.result['data'] = payload
meraki.exit_json(**meraki.result)
return -1
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['present', 'query', 'absent'], required=True),
name=dict(type='str'),
email=dict(type='str'),
org_access=dict(type='str', aliases=['orgAccess'], choices=['full', 'read-only', 'none']),
tags=dict(type='json'),
networks=dict(type='json'),
org_name=dict(type='str', aliases=['organization']),
org_id=dict(type='str'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='admin')
meraki.function = 'admin'
meraki.params['follow_redirects'] = 'all'
query_urls = {'admin': '/organizations/{org_id}/admins',
}
create_urls = {'admin': '/organizations/{org_id}/admins',
}
update_urls = {'admin': '/organizations/{org_id}/admins/',
}
revoke_urls = {'admin': '/organizations/{org_id}/admins/',
}
meraki.url_catalog['query'] = query_urls
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['revoke'] = revoke_urls
try:
meraki.params['auth_key'] = os.environ['MERAKI_KEY']
except KeyError:
pass
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# execute checks for argument completeness
if meraki.params['state'] == 'query':
meraki.mututally_exclusive = ['name', 'email']
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id required')
meraki.required_if = [(['state'], ['absent'], ['email']),
]
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not meraki.params['org_id']:
org_id = meraki.get_org_id(meraki.params['org_name'])
if meraki.params['state'] == 'query':
admins = get_admins(meraki, org_id)
if not meraki.params['name'] and not meraki.params['email']: # Return all admins for org
meraki.result['data'] = admins
if meraki.params['name'] is not None: # Return a single admin for org
admin_id = get_admin_id(meraki, admins, name=meraki.params['name'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['email'] is not None:
admin_id = get_admin_id(meraki, admins, email=meraki.params['email'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['state'] == 'present':
r = create_admin(meraki,
org_id,
meraki.params['name'],
meraki.params['email'],
)
if r != -1:
meraki.result['data'] = r
elif meraki.params['state'] == 'absent':
if meraki.module.check_mode is True:
meraki.result['data'] = {}
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
admin_id = get_admin_id(meraki,
get_admins(meraki, org_id),
email=meraki.params['email']
)
r = delete_admin(meraki, org_id, admin_id)
if r != -1:
meraki.result['data'] = r
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
knowmetools/km-api | km_api/know_me/signals.py | 1 | 2117 | import logging
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_email_auth.models import EmailAddress
from rest_email_auth.signals import user_registered
from know_me import models
logger = logging.getLogger(__name__)
@receiver(user_registered)
def create_km_user(user, **kwargs):
"""
Create a Know Me user for each registered user.
Each time a user registers, a Know Me user is automatically created
for them.
Args:
user:
The user who just registered.
"""
models.KMUser.objects.create(image=user.image, user=user)
logger.info("Created Know Me user for user %s", user)
@receiver(post_save, sender=EmailAddress)
def update_accessor(instance, **kwargs):
"""
Update accessors that have an email address but no user.
If the accessor has an email address only and that email is now
verified, the accessor is updated to point to the user who owns the
email address.
Args:
instance:
The email address that was just saved.
"""
logger.debug(
"Updating KMUserAccessor instances for email %s", instance.email
)
try:
accessor = models.KMUserAccessor.objects.get(
email=instance.email, user_with_access=None
)
except models.KMUserAccessor.DoesNotExist:
return
if instance.is_verified:
dupe_query = accessor.km_user.km_user_accessors.filter(
user_with_access=instance.user
)
if dupe_query.exists():
duplicate = dupe_query.get()
logger.warning(
"Deleting accessor linking %s and %s because the user has "
"been granted access through the email address %s",
instance.email,
"{} (ID: {})".format(accessor.km_user, accessor.km_user.id),
duplicate.email,
)
accessor.delete()
return
accessor.user_with_access = instance.user
accessor.save()
logger.info("Updated KMUserAccessor for email %s", instance.email)
| apache-2.0 |
projectcalico/calico-nova | nova/tests/unit/scheduler/test_scheduler_utils.py | 4 | 16257 | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import notifications
from nova import objects
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
CONF = cfg.CONF
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_build_request_spec_requeries_extra_specs(self, mock_get):
flavor = objects.Flavor(**test_flavor.fake_flavor)
flavor.extra_specs = {'hw:numa_cpus.1': '1985'}
instance = objects.Instance(id=0, uuid=uuid.uuid4().hex,
system_metadata={})
with mock.patch.object(instance, 'save'):
instance.set_flavor(flavor.obj_clone())
flavor.extra_specs = {'second': '2015', 'third': '1885'}
mock_get.return_value = flavor
request_spec = scheduler_utils.build_request_spec(self.context,
None,
[instance])
mock_get.assert_called_once_with(self.context,
flavor.flavorid)
self.assertEqual({'hw:numa_cpus.1': '1985',
'second': '2015',
'third': '1885'},
request_spec['instance_type']['extra_specs'])
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_build_request_spec_without_image(self, mock_get):
image = None
instance = {'uuid': 'fake-uuid'}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
mock_get.return_value = objects.Flavor(extra_specs={})
self.mox.StubOutWithMock(flavors, 'extract_flavor')
flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
self.mox.ReplayAll()
request_spec = scheduler_utils.build_request_spec(self.context, image,
[instance])
self.assertEqual({}, request_spec['image'])
@mock.patch.object(flavors, 'extract_flavor')
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
def test_build_request_spec_with_object(self, mock_get, extract_flavor):
instance_type = objects.Flavor(**test_flavor.fake_flavor)
instance = fake_instance.fake_instance_obj(self.context)
mock_get.return_value = objects.Flavor(extra_specs={})
extract_flavor.return_value = instance_type
request_spec = scheduler_utils.build_request_spec(self.context, None,
[instance])
self.assertIsInstance(request_spec['instance_properties'], dict)
def test_set_vm_state_and_notify(self):
expected_uuid = 'fake-uuid'
request_spec = dict(instance_properties=dict(uuid='other-uuid'))
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
self.mox.StubOutWithMock(notifications, 'send_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(rpc, 'get_notifier')
notifier = self.mox.CreateMockAnything()
rpc.get_notifier(service).AndReturn(notifier)
old_ref = 'old_ref'
new_ref = 'new_ref'
inst_obj = 'inst_obj'
db.instance_update_and_get_original(
self.context, expected_uuid, updates,
columns_to_join=['system_metadata']).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, old_ref, inst_obj,
service=service)
compute_utils.add_instance_fault_from_exc(
self.context,
new_ref, exc_info, mox.IsA(tuple))
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties', {}),
instance_id=expected_uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
notifier.error(self.context, event_type, payload)
self.mox.ReplayAll()
with mock.patch.object(objects.Instance, '_from_db_object',
return_value=inst_obj):
scheduler_utils.set_vm_state_and_notify(self.context,
expected_uuid,
service,
method,
updates,
exc_info,
request_spec,
db)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
force_nodes=None):
if force_hosts is None:
force_hosts = []
if force_nodes is None:
force_nodes = []
if with_retry:
if not force_hosts and not force_nodes:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict(force_hosts=force_hosts,
force_nodes=force_nodes)
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if with_retry and not force_hosts and not force_nodes:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if force_hosts:
expected_limits = None
else:
expected_limits = 'fake-limits'
self.assertEqual(expected_limits,
filter_properties.get('limits'))
if with_retry and not force_hosts and not force_nodes:
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
def test_populate_filter_props_force_hosts_no_retry(self):
self._test_populate_filter_props(force_hosts=['force-host'])
def test_populate_filter_props_force_nodes_no_retry(self):
self._test_populate_filter_props(force_nodes=['force-node'])
@mock.patch.object(scheduler_utils, '_max_attempts')
def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
_max_attempts.return_value = 2
msg = 'The exception text was preserved!'
filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
exc=[msg]))
nvh = self.assertRaises(exception.NoValidHost,
scheduler_utils.populate_retry,
filter_properties, 'fake-uuid')
# make sure 'msg' is a substring of the complete exception text
self.assertIn(msg, nvh.message)
def _check_parse_options(self, opts, sep, converter, expected):
good = scheduler_utils.parse_options(opts,
sep=sep,
converter=converter)
for item in expected:
self.assertIn(item, good)
def test_parse_options(self):
# check normal
self._check_parse_options(['foo=1', 'bar=-2.1'],
'=',
float,
[('foo', 1.0), ('bar', -2.1)])
# check convert error
self._check_parse_options(['foo=a1', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check separator missing
self._check_parse_options(['foo', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
# check key missing
self._check_parse_options(['=5', 'bar=-2.1'],
'=',
float,
[('bar', -2.1)])
def test_validate_filters_configured(self):
self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
return group
def _get_group_details(self, group, policy=None):
group_hosts = ['hostB']
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(
self.context, 'fake_uuid', group_hosts)
self.assertEqual(
(set(['hostA', 'hostB']), [policy]),
group_info)
def test_get_group_details(self):
for policy in ['affinity', 'anti-affinity']:
group = self._create_server_group(policy)
self._get_group_details(group, policy=policy)
def test_get_group_details_with_no_affinity_filters(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context,
'fake-uuid')
self.assertIsNone(group_info)
def test_get_group_details_with_no_instance_uuid(self):
self.flags(scheduler_default_filters=['fake'])
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
group_info = scheduler_utils._get_group_details(self.context, None)
self.assertIsNone(group_info)
def _get_group_details_with_filter_not_configured(self, policy):
wrong_filter = {
'affinity': 'ServerGroupAntiAffinityFilter',
'anti-affinity': 'ServerGroupAffinityFilter',
}
self.flags(scheduler_default_filters=[wrong_filter[policy]])
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = objects.InstanceGroup()
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
group.policies = [policy]
with contextlib.nested(
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
scheduler_utils._SUPPORTS_AFFINITY = None
self.assertRaises(exception.NoValidHost,
scheduler_utils._get_group_details,
self.context, 'fake-uuid')
def test_get_group_details_with_filter_not_configured(self):
policies = ['anti-affinity', 'affinity']
for policy in policies:
self._get_group_details_with_filter_not_configured(policy)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_in_filter_properties(self, mock_ggd):
mock_ggd.return_value = scheduler_utils.GroupDetails(
hosts=set(['hostA', 'hostB']), policies=['policy'])
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
expected_filter_props = {'group_updated': True,
'group_hosts': set(['hostA', 'hostB']),
'group_policies': ['policy']}
self.assertEqual(expected_filter_props, filter_props)
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_no_group(self, mock_ggd):
mock_ggd.return_value = None
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
scheduler_utils.setup_instance_group(self.context, spec, filter_props)
mock_ggd.assert_called_once_with(self.context, 'fake-uuid',
['hostC'])
self.assertNotIn('group_updated', filter_props)
self.assertNotIn('group_policies', filter_props)
self.assertEqual(['hostC'], filter_props['group_hosts'])
@mock.patch.object(scheduler_utils, '_get_group_details')
def test_setup_instance_group_with_filter_not_configured(self, mock_ggd):
mock_ggd.side_effect = exception.NoValidHost(reason='whatever')
spec = {'instance_properties': {'uuid': 'fake-uuid'}}
filter_props = {'group_hosts': ['hostC']}
self.assertRaises(exception.NoValidHost,
scheduler_utils.setup_instance_group,
self.context, spec, filter_props)
| apache-2.0 |
printedheart/h2o-3 | py2/testdir_single_jvm/test_quant_cmp_uniform.py | 20 | 8751 | import unittest, time, sys, random, math, getpass
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_util
import h2o_print as h2p, h2o_exec as h2e, h2o_summ
from h2o_test import dump_json, verboseprint, OutputObj
# have to match the csv file?
# dtype=['string', 'float');
probsList = [0.001, 0.01, 0.1, 0.25, 0.33, 0.5, 0.66, 0.75, 0.9, 0.99, 0.999]
ROWS = 100000
# tweak this to check different results (compare to sort/numpy/scipy)
CHECK_PCTILE_INDEX = 10
assert CHECK_PCTILE_INDEX >=0
assert CHECK_PCTILE_INDEX < len(probsList)
CHECK_PCTILE = probsList[CHECK_PCTILE_INDEX]
def write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
expectedRange = (expectedMax - expectedMin)
for i in range(rowCount):
rowData = []
ri = expectedMin + (random.uniform(0,1) * expectedRange)
for j in range(colCount):
rowData.append(ri)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_quant_cmp_uniform(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(5*ROWS, 1, 'x.hex', 1, 20000, ['C1', 1.10, 5000.0, 10000.0, 15000.0, 20000.00]),
(5*ROWS, 1, 'x.hex', -5000, 0, ['C1', -5001.00, -3750.0, -2445, -1200.0, 99]),
(1*ROWS, 1, 'x.hex', -100000, 100000, ['C1', -100001.0, -50000.0, 1613.0, 50000.0, 100000.0]),
(1*ROWS, 1, 'x.hex', -1, 1, ['C1', -1.05, -0.48, 0.0087, 0.50, 1.00]),
(1*ROWS, 1, 'A.hex', 1, 100, ['C1', 1.05, 26.00, 51.00, 76.00, 100.0]),
(1*ROWS, 1, 'A.hex', -99, 99, ['C1', -99, -50.0, 0, 50.00, 99]),
(1*ROWS, 1, 'B.hex', 1, 10000, ['C1', 1.05, 2501.00, 5001.00, 7501.00, 10000.00]),
(1*ROWS, 1, 'B.hex', -100, 100, ['C1', -100.10, -50.0, 0.85, 51.7, 100,00]),
(1*ROWS, 1, 'C.hex', 1, 100000, ['C1', 1.05, 25002.00, 50002.00, 75002.00, 100000.00]),
(1*ROWS, 1, 'C.hex', -101, 101, ['C1', -100.10, -50.45, -1.18, 49.28, 100.00]),
]
timeoutSecs = 10
trial = 1
n = h2o.nodes[0]
lenNodes = len(h2o.nodes)
x = 0
timeoutSecs = 60
for (rowCount, colCount, hex_key, expectedMin, expectedMax, expected) in tryList:
# max error = half the bin size?
colname = expected[0]
maxDelta = ((expectedMax - expectedMin)/1000.0) / 2.0
# add 5% for fp errors?
maxDelta = 1.05 * maxDelta
SEEDPERFILE = random.randint(0, sys.maxint)
x += 1
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, expectedMin, expectedMax, SEEDPERFILE)
# need the full pathname when python parses the csv for numpy/sort
csvPathnameFull = h2i.find_folder_and_filename(None, csvPathname, returnFullPath=True)
#***************************
# Parse
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult, expectedNumRows=rowCount, expectedNumCols=colCount)
numRows = pA.numRows
numCols = pA.numCols
parse_key = pA.parse_key
# this guy can take json object as first thing, or re-read with key
iA = h2o_cmd.InspectObj(parse_key,
expectedNumRows=rowCount, expectedNumCols=colCount, expectedMissinglist=[])
#***************************
# Summary
co = h2o_cmd.runSummary(key=parse_key)
default_pctiles = co.default_pctiles
coList = [ co.base, len(co.bins), len(co.data), co.domain,
co.label, co.maxs, co.mean, co.mins, co.missing, co.ninfs, co.pctiles,
co.pinfs, co.precision, co.sigma, co.str_data, co.stride, co.type, co.zeros]
for c in coList:
print c
print "len(co.bins):", len(co.bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
print "FIX! hacking the co.pctiles because it's short by two"
summ_pctiles = [0] + co.pctiles + [0]
pt = h2o_util.twoDecimals(summ_pctiles)
mx = h2o_util.twoDecimals(co.maxs)
mn = h2o_util.twoDecimals(co.mins)
exp = h2o_util.twoDecimals(expected[1:])
print "co.label:", co.label, "co.pctiles (2 places):", pt
print "default_pctiles:", default_pctiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! we should do an exec and compare using the exec quantile too
h2p.green_print("min/25/50/75/max co.label:", co.label, "(2 places):",\
mn[0], pt[3], pt[5], pt[7], mx[0])
h2p.green_print("min/25/50/75/max co.label:", co.label, "(2 places):",\
exp[0], exp[1], exp[2], exp[3], exp[4])
#***************************
# Quantile
# the thresholds h2o used, should match what we expected
# using + here seems to result in an odd tuple..doesn't look right to h2o param
# so went with this. Could add '[' and ']' to the list first, before the join.
probsStr = "[%s]" % ",".join(map(str,probsList))
parameters = {
'model_id': "a.hex",
'training_frame': parse_key,
'validation_frame': parse_key,
'ignored_columns': None,
'probs': probsStr,
}
model_key = 'qhex'
bmResult = h2o.n0.build_model(
algo='quantile',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=10)
bm = OutputObj(bmResult, 'bm')
msec = bm.jobs[0]['msec']
print "bm msec", msec
# quantile result is just a job result to a key
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0], 'model')
print "model.output:", model.output
print "model.output:['quantiles']", model.output['quantiles']
print "model.output:['iterations']", model.output['iterations']
print "model.output:['names']", model.output['names']
quantiles = model.output['quantiles'][0] # why is this a double array
iterations = model.output['iterations']
assert iterations == 11, iterations
print "quantiles: ", quantiles
print "iterations: ", iterations
# cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
# cmm = OutputObj(cmmResult, 'cmm')
# mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
# mm = OutputObj(mmResult, 'mm')
# prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
# pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
h2o_cmd.runStoreView()
trial += 1
# compare the last threshold
if colname!='':
# don't do for enums
# also get the median with a sort (h2o_summ.percentileOnSortedlist()
h2o_summ.quantile_comparisons(
csvPathnameFull,
col=0, # what col to extract from the csv
datatype='float',
quantile=CHECK_PCTILE,
# h2oSummary2=pctile[-1],
# h2oQuantilesApprox=result, # from exec
h2oExecQuantiles=quantiles[CHECK_PCTILE_INDEX],
)
h2o.nodes[0].remove_all_keys()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Xero-Hige/PythonTDA | TP1/influence.py | 1 | 1916 | from graph import Graph
from collections import deque
def Brand1(fb_graph):
influence = {}
for vertex in fb_graph.vertex:
influence[vertex] = 0
for s in fb_graph.vertex:
stack = deque()
P = {}
o = {}
d = {}
sigma = {}
for vertex in fb_graph.vertex:
P[vertex] = []
o[vertex] = 0
d[vertex] = -1
sigma[vertex] = 0
o[s] = 1
d[s] = 0
queue = deque()
queue.append(s)
while len(queue) > 0:
v = queue.popleft()
stack.append(v)
for neighbour in fb_graph.get_vertex_neighbours(v):
#Se encontro por primera vez?
if d[neighbour] < 0:
queue.append(neighbour)
d[neighbour] = d[v] + 1
#Es el camino mas a neighbour via v?
if d[neighbour] == d[v] + 1:
o[neighbour] = o[neighbour] + o[v]
P[neighbour].append(v)
while len(stack) > 0:
w = stack.pop()
for v in P[w]:
sigma[v] = sigma[v] + o[v]/(float(o[w])) * (1 + sigma[w])
if w != s:
influence[w] = float(influence[w]) + float(sigma[w])
return influence
def decode(fb_graph,influences):
"""Genera una lista con tuplas nombre,influencia en base al diccionario
de influencias pasado como parametro, obteniendo los nombres correspondientes
a las id del mismo del grafo"""
result = []
for id,influence in influences.items():
name = fb_graph.get_vertex_data(id)
result.append((name,influence))
return result
def show_influence(fb_graph):
"""Calcula y muestra la influencia de cada actor dentro del grafo de facebook
en forma decreciente en base a su influencia"""
influences = Brand1(fb_graph)
influence_list = decode(fb_graph,influences)
influence_list.sort(key=lambda tup: tup[1],reverse=True)
for name,influence in influence_list:
if influence<0.01 and influence>0:
influence = 0.01
#Al mostrar la influencia la redondea solo para para poder separar los casos que son 0 de los que no
print '{0:<30s} Influencia: {1:>9.2f}'.format(name,influence)
| gpl-3.0 |
behzadnouri/scipy | scipy/weave/examples/fibonacci.py | 100 | 3980 | # Typical run:
# C:\home\eric\wrk\scipy\weave\examples>python fibonacci.py
# Recursively computing the first 30 fibonacci numbers:
# speed in python: 4.31599998474
# speed in c: 0.0499999523163
# speed up: 86.32
# Looping to compute the first 30 fibonacci numbers:
# speed in python: 0.000520999908447
# speed in c: 5.00000715256e-005
# speed up: 10.42
# fib(30) 832040 832040 832040 832040
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import ext_tools
def build_fibonacci():
""" Builds an extension module with fibonacci calculators.
"""
mod = ext_tools.ext_module('fibonacci_ext')
a = 1 # this is effectively a type declaration
# recursive fibonacci in C
fib_code = """
int fib1(int a)
{
if(a <= 2)
return 1;
else
return fib1(a-2) + fib1(a-1);
}
"""
ext_code = """
return_val = fib1(a);
"""
fib = ext_tools.ext_function('c_fib1',ext_code,['a'])
fib.customize.add_support_code(fib_code)
mod.add_function(fib)
# looping fibonacci in C
fib_code = """
int fib2( int a )
{
int last, next_to_last, result;
if( a <= 2 )
return 1;
last = next_to_last = 1;
for(int i = 2; i < a; i++ )
{
result = last + next_to_last;
next_to_last = last;
last = result;
}
return result;
}
"""
ext_code = """
return_val = fib2(a);
"""
fib = ext_tools.ext_function('c_fib2',ext_code,['a'])
fib.customize.add_support_code(fib_code)
mod.add_function(fib)
mod.compile()
try:
import fibonacci_ext
except ImportError:
build_fibonacci()
import fibonacci_ext
c_fib1 = fibonacci_ext.c_fib1
c_fib2 = fibonacci_ext.c_fib2
#################################################################
# This where it might normally end, but we've added some timings
# below. Recursive solutions are much slower, and C is 10-50x faster
# than equivalent in Python for this simple little routine
#
#################################################################
def py_fib1(a):
if a <= 2:
return 1
else:
return py_fib1(a-2) + py_fib1(a-1)
def py_fib2(a):
if a <= 2:
return 1
last = next_to_last = 1
for i in range(2,a):
result = last + next_to_last
next_to_last = last
last = result
return result
import time
def recurse_compare(n):
print('Recursively computing the first %d fibonacci numbers:' % n)
t1 = time.time()
for i in range(n):
py_fib1(i)
t2 = time.time()
py = t2 - t1
print(' speed in python:', t2 - t1)
# load into cache
c_fib1(i)
t1 = time.time()
for i in range(n):
c_fib1(i)
t2 = time.time()
print(' speed in c:',t2 - t1)
print(' speed up: %3.2f' % (py/(t2-t1)))
def loop_compare(m,n):
print('Looping to compute the first %d fibonacci numbers:' % n)
t1 = time.time()
for i in range(m):
for i in range(n):
py_fib2(i)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1)/m)
# load into cache
c_fib2(i)
t1 = time.time()
for i in range(m):
for i in range(n):
c_fib2(i)
t2 = time.time()
print(' speed in c:',(t2 - t1) / m)
print(' speed up: %3.2f' % (py/(t2-t1)))
if __name__ == "__main__":
n = 30
recurse_compare(n)
m = 1000
loop_compare(m,n)
print('fib(30)', c_fib1(30),py_fib1(30),c_fib2(30),py_fib2(30))
| bsd-3-clause |
Matt-Deacalion/django | tests/template_tests/filter_tests/test_length_is.py | 360 | 3204 | from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
| bsd-3-clause |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 196 | 5440 | from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
| gpl-3.0 |
faustinoaq/chat | main.py | 2 | 5367 | """
Open Chat by @faustinoaq
This app use Web.py micro web framework
and SQLAlchemy ORM
"""
import web # Web.py framework
import json # Used for generate JSON documents
import time # Used for generate timestamps
from db import db # Get the database queries
from random import randrange # Used for generate random numbers
web.config.debug = False
# Routes and classes
urls = (
'/', 'Index',
'/data/(.*)', 'Data',
'/exit', 'Exit',
'/reset', 'Reset'
)
app = web.application(urls, globals())
render = web.template.render('templates', base='base', globals={})
make = web.template.render('templates', globals={})
class Index:
def __init__(self):
self.maxUsers = 10 # LIMITED TO 30 USERS/COLORS
clients = db.select('user', what='count(*) as count')[0]
if clients.count >= self.maxUsers:
raise web.seeother('/data/warning')
try:
cookie = web.cookies()
if cookie.user == '' and cookie.color == '':
Color = self.color()
User = self.name()
web.setcookie('color', Color, 604800)
web.setcookie('user', User, 604800)
timestamp = time.strftime("%Y-%m-%d %I:%M:%S %p")
db.insert('user', user=User,
color=Color,
timestamp=timestamp)
else:
data = db.select('user', where='user="{0}"'.format(cookie.user))
if not data:
x
except BaseException as ex:
print ex
web.setcookie('user', '', 3600)
web.setcookie('color', '', 3600)
raise web.seeother('/')
def color(self):
stepRange = 50 # Max color = 255/stepRange * 6
r = randrange(start=100, stop=255, step=stepRange)
b = randrange(100, 255, stepRange)
g = randrange(100, 255, stepRange)
rgb = [
[255, 0, b],
[0, g, 255],
[r, 0, 255],
[r, 255, 0],
[0, 255, b],
[255, g, 0],
]
existColor = True
while existColor:
rgb = rgb[randrange(0, 6, 1)]
color = "rgb({0}, {1}, {2})".format(rgb[0], rgb[1], rgb[2])
data = db.select('user', where='color="{0}"'.format(color))
if data:
existColor = True
else:
existColor = False
return color
def name(self):
existUser = True
while existUser:
user = 'User' + str(randrange(1, self.maxUsers*5, 1))
data = db.select('user', where='user="{0}"'.format(user))
if data:
existUser = True
else:
existUser = False
return user
def GET(self):
cookie = web.cookies()
if cookie.user and cookie.color:
return render.home(cookie.user, cookie.color)
raise web.seeother('/')
def POST(self):
i = web.input()
cookie = web.cookies()
timestamp = time.strftime("%Y-%m-%d %I:%M:%S %p")
db.insert('data', timestamp=timestamp,
content=i.content,
user=cookie.user)
class Data:
def hackIter(self, Iterbetter):
list = []
for Iter in Iterbetter:
list.append(Iter)
return list
def GET(self, data):
if data == "report":
clients = db.select('user', what='count(*) as count')[0]
messages = db.select('data', what='count(*) as count')[0]
report = {'clients': clients.count, 'messages': messages.count}
return "report = {0}".format(json.dumps(report, indent=4))
if data == "users":
users = self.hackIter(db.select('user'))
return "users = {0}".format(json.dumps(users, indent=4))
elif data == "last-message":
data = self.hackIter(db.select('data', order="id DESC", limit=1))
clients = self.hackIter(db.select('user'))
return make.message(data, clients)
elif data == "recent-messages":
data = self.hackIter(db.select('data', order="id DESC", limit=100))
clients = self.hackIter(db.select('user'))
return make.message(data, clients)
elif data == "all-messages":
data = self.hackIter(db.select('data', order="id DESC"))
clients = self.hackIter(db.select('user'))
return make.message(data, clients)
elif data == "warning":
return render.warning()
class Exit:
def GET(self):
cookie = web.cookies()
web.setcookie('user', '', 3600)
web.setcookie('color', '', 3600)
db.delete('user', where='user="{0}"'.format(cookie.user))
db.delete('data', where='user="{0}"'.format(cookie.user))
return render.bye()
class Reset:
def GET(self):
db.delete('user', where="id>0")
db.delete('data', where="id>0")
web.setcookie('user', '', 3600)
web.setcookie('color', '', 3600)
return render.bye()
if __name__ == '__main__':
app.run()
| mit |
felliott/osf.io | osf/migrations/0106_set_preprint_identifier_category.py | 16 | 1415 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-29 19:46
from __future__ import unicode_literals
from django.db import migrations, models
from django.contrib.contenttypes.models import ContentType
def set_preprint_identifier_catetory_to_legacy(apps, *args, **kwargs):
PreprintService = apps.get_model('osf', 'PreprintService')
Identifier = apps.get_model('osf', 'Identifier')
preprint_content_type = ContentType.objects.get_for_model(PreprintService)
Identifier.objects.filter(content_type_id=preprint_content_type.id, category='doi').update(category='legacy_doi')
def return_preprint_identifier_category_to_doi(apps, *args, **kwargs):
PreprintService = apps.get_model('osf', 'PreprintService')
Identifier = apps.get_model('osf', 'Identifier')
preprint_content_type = ContentType.objects.get_for_model(PreprintService)
Identifier.objects.filter(content_type_id=preprint_content_type.id, category='legacy_doi').update(category='doi')
class Migration(migrations.Migration):
dependencies = [
('osf', '0105_add_identifier_deleted_field'),
]
operations = [
migrations.AlterField(
model_name='identifier',
name='category',
field=models.CharField(max_length=20),
),
migrations.RunPython(set_preprint_identifier_catetory_to_legacy, return_preprint_identifier_category_to_doi)
]
| apache-2.0 |
SrNetoChan/Quantum-GIS | python/plugins/db_manager/db_plugins/postgis/sql_dictionary.py | 30 | 16054 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sql_dictionary.py
---------------------
Date : April 2012
Copyright : (C) 2012 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import map
__author__ = 'Giuseppe Sucameli'
__date__ = 'April 2012'
__copyright__ = '(C) 2012, Giuseppe Sucameli'
# keywords
keywords = [
# TODO get them from a reference page
"action", "add", "after", "all", "alter", "analyze", "and", "as", "asc",
"before", "begin", "between", "by", "cascade", "case", "cast", "check",
"collate", "column", "commit", "constraint", "create", "cross", "current_date",
"current_time", "current_timestamp", "default", "deferrable", "deferred",
"delete", "desc", "distinct", "drop", "each", "else", "end", "escape",
"except", "exists", "for", "foreign", "from", "full", "group", "having",
"ignore", "immediate", "in", "initially", "inner", "insert", "intersect",
"into", "is", "isnull", "join", "key", "left", "like", "limit", "match",
"natural", "no", "not", "notnull", "null", "of", "offset", "on", "or", "order",
"outer", "primary", "references", "release", "restrict", "right", "rollback",
"row", "savepoint", "select", "set", "table", "temporary", "then", "to",
"transaction", "trigger", "union", "unique", "update", "using", "values",
"view", "when", "where",
"absolute", "admin", "aggregate", "alias", "allocate", "analyse", "any", "are",
"array", "asensitive", "assertion", "asymmetric", "at", "atomic",
"authorization", "avg", "bigint", "binary", "bit", "bit_length", "blob",
"boolean", "both", "breadth", "call", "called", "cardinality", "cascaded",
"catalog", "ceil", "ceiling", "char", "character", "character_length",
"char_length", "class", "clob", "close", "coalesce", "collation", "collect",
"completion", "condition", "connect", "connection", "constraints",
"constructor", "continue", "convert", "corr", "corresponding", "count",
"covar_pop", "covar_samp", "cube", "cume_dist", "current",
"current_default_transform_group", "current_path", "current_role",
"current_transform_group_for_type", "current_user", "cursor", "cycle", "data",
"date", "day", "deallocate", "dec", "decimal", "declare", "dense_rank",
"depth", "deref", "describe", "descriptor", "destroy", "destructor",
"deterministic", "diagnostics", "dictionary", "disconnect", "do", "domain",
"double", "dynamic", "element", "end-exec", "equals", "every", "exception",
"exec", "execute", "exp", "external", "extract", "false", "fetch", "filter",
"first", "float", "floor", "found", "free", "freeze", "function", "fusion",
"general", "get", "global", "go", "goto", "grant", "grouping", "hold", "host",
"hour", "identity", "ilike", "indicator", "initialize", "inout", "input",
"insensitive", "int", "integer", "intersection", "interval", "isolation",
"iterate", "language", "large", "last", "lateral", "leading", "less", "level",
"ln", "local", "localtime", "localtimestamp", "locator", "lower", "map", "max",
"member", "merge", "method", "min", "minute", "mod", "modifies", "modify",
"module", "month", "multiset", "names", "national", "nchar", "nclob", "new",
"next", "none", "normalize", "nullif", "numeric", "object", "octet_length",
"off", "old", "only", "open", "operation", "option", "ordinality", "out",
"output", "over", "overlaps", "overlay", "pad", "parameter", "parameters",
"partial", "partition", "path", "percentile_cont", "percentile_disc",
"percent_rank", "placing", "position", "postfix", "power", "precision",
"prefix", "preorder", "prepare", "preserve", "prior", "privileges",
"procedure", "public", "range", "rank", "read", "reads", "real", "recursive",
"ref", "referencing", "regr_avgx", "regr_avgy", "regr_count", "regr_intercept",
"regr_r2", "regr_slope", "regr_sxx", "regr_sxy", "regr_syy", "relative",
"result", "return", "returning", "returns", "revoke", "role", "rollup",
"routine", "rows", "row_number", "schema", "scope", "scroll", "search",
"second", "section", "sensitive", "sequence", "session", "session_user",
"sets", "similar", "size", "smallint", "some", "space", "specific",
"specifictype", "sql", "sqlcode", "sqlerror", "sqlexception", "sqlstate",
"sqlwarning", "sqrt", "start", "state", "statement", "static", "stddev_pop",
"stddev_samp", "structure", "submultiset", "substring", "sum", "symmetric",
"system", "system_user", "tablesample", "terminate", "than", "time",
"timestamp", "timezone_hour", "timezone_minute", "trailing", "translate",
"translation", "treat", "trim", "true", "uescape", "under", "unknown",
"unnest", "upper", "usage", "user", "value", "varchar", "variable", "varying",
"var_pop", "var_samp", "verbose", "whenever", "width_bucket", "window", "with",
"within", "without", "work", "write", "xml", "xmlagg", "xmlattributes",
"xmlbinary", "xmlcomment", "xmlconcat", "xmlelement", "xmlforest",
"xmlnamespaces", "xmlparse", "xmlpi", "xmlroot", "xmlserialize", "year", "zone"
]
postgis_keywords = []
# functions
functions = [
"coalesce",
"nullif", "quote", "random",
"replace", "soundex"
]
operators = [
' AND ', ' OR ', '||', ' < ', ' <= ', ' > ', ' >= ', ' = ', ' <> ', ' IS ', ' IS NOT ', ' IN ', ' LIKE ', ' GLOB ', ' MATCH ', ' REGEXP '
]
math_functions = [
# SQL math functions
"Abs", "ACos", "ASin", "ATan", "Cos", "Cot", "Degrees", "Exp", "Floor", "Log", "Log2",
"Log10", "Pi", "Radians", "Round", "Sign", "Sin", "Sqrt", "StdDev_Pop", "StdDev_Samp", "Tan",
"Var_Pop", "Var_Samp"]
string_functions = ["Length", "Lower", "Upper", "Like", "Trim", "LTrim", "RTrim", "Replace", "Substr"]
aggregate_functions = [
"Max", "Min", "Avg", "Count", "Sum", "Group_Concat", "Total", "Var_Pop", "Var_Samp", "StdDev_Pop", "StdDev_Samp"
]
postgis_functions = [ # from http://www.postgis.org/docs/reference.html
# 7.1. PostgreSQL PostGIS Types
"*box2d", "*box3d", "*box3d_extent", "*geometry", "*geometry_dump", "*geography",
# 7.2. Management Functions
"*addgeometrycolumn", "*dropgeometrycolumn", "*dropgeometrytable", "*postgis_full_version",
"*postgis_geos_version", "*postgis_libxml_version", "*postgis_lib_build_date",
"*postgis_lib_version", "*postgis_proj_version", "*postgis_scripts_build_date",
"*postgis_scripts_installed", "*postgis_scripts_released", "*postgis_uses_stats", "*postgis_version",
"*populate_geometry_columns", "*probe_geometry_columns", "*updategeometrysrid",
# 7.3. Geometry Constructors
"*ST_bdpolyfromtext", "*ST_bdmpolyfromtext", "*ST_geogfromtext", "*ST_geographyfromtext",
"*ST_geogfromwkb", "*ST_geomcollfromtext", "*ST_geomfromewkb", "*ST_geomfromewkt",
"*ST_geometryfromtext", "*ST_geomfromgml", "*ST_geomfromkml", "*ST_gmltosql", "*ST_geomfromtext",
"*ST_geomfromwkb", "*ST_linefrommultipoint", "*ST_linefromtext", "*ST_linefromwkb",
"*ST_linestringfromwkb", "*ST_makebox2d", "*ST_makebox3d", "ST_MakeLine", "*ST_makeenvelope",
"ST_MakePolygon", "ST_MakePoint", "ST_MakePointM", "*ST_MLinefromtext", "*ST_mpointfromtext",
"*ST_mpolyfromtext", "ST_Point", "*ST_pointfromtext", "*ST_pointfromwkb", "ST_Polygon",
"*ST_polygonfromtext", "*ST_wkbtosql", "*ST_wkttosql",
# 7.4. Geometry Accessors
"GeometryType", "ST_Boundary", "*ST_coorddim", "ST_Dimension", "ST_EndPoint", "ST_Envelope",
"ST_ExteriorRing", "ST_GeometryN", "ST_GeometryType", "ST_InteriorRingN", "ST_isClosed",
"ST_isEmpty", "ST_isRing", "ST_isSimple", "ST_isValid", "ST_isValidReason", "ST_M", "ST_NDims",
"ST_NPoints", "ST_NRings", "ST_NumGeometries", "ST_NumInteriorrings", "ST_NumInteriorring",
"ST_NumPoints", "ST_PointN", "ST_Srid", "ST_StartPoint", "ST_Summary", "ST_X", "ST_Y", "ST_Z",
"*ST_zmflag",
# 7.5. Geometry Editors
"ST_AddPoint", "ST_Affine", "ST_Force2D", "*ST_Force3D", "*ST_Force3dZ", "*ST_Force3DM",
"*ST_Force_4d", "*ST_force_collection", "*ST_forcerhr", "*ST_linemerge", "*ST_collectionextract",
"ST_Multi", "*ST_removepoint", "*ST_reverse", "*ST_rotate", "*ST_rotatex", "*ST_rotatey",
"*ST_rotatez", "*ST_scale", "*ST_segmentize", "*ST_setpoint", "ST_SetSrid", "ST_SnapToGrid",
"ST_Transform", "ST_Translate", "*ST_transscale",
# 7.6. Geometry Outputs
"*ST_asbinary", "*ST_asewkb", "*ST_asewkt", "*ST_asgeojson", "*ST_asgml", "*ST_ashexewkb", "*ST_askml",
"*ST_assvg", "*ST_geohash", "ST_Astext",
# 7.7. Operators
# 7.8. Spatial Relationships and Measurements
"ST_Area", "ST_Azimuth", "ST_Centroid", "ST_ClosestPoint", "ST_Contains", "ST_ContainsProperly",
"ST_Covers", "ST_CoveredBy", "ST_Crosses", "*ST_linecrossingdirection", "ST_Cisjoint",
"ST_Distance", "*ST_hausdorffdistance", "*ST_maxdistance", "ST_Distance_Sphere",
"ST_Distance_Spheroid", "*ST_DFullyWithin", "ST_DWithin", "ST_Equals", "*ST_hasarc",
"ST_Intersects", "ST_Length", "*ST_Length2d", "*ST_length3d", "ST_Length_Spheroid",
"*ST_length2d_spheroid", "*ST_length3d_spheroid", "*ST_longestline", "*ST_orderingequals",
"ST_Overlaps", "*ST_perimeter", "*ST_perimeter2d", "*ST_perimeter3d", "ST_PointOnSurface",
"ST_Relate", "ST_ShortestLine", "ST_Touches", "ST_Within",
# 7.9. Geometry Processing Functions
"ST_Buffer", "ST_BuildArea", "ST_Collect", "ST_ConvexHull", "*ST_curvetoline", "ST_Difference",
"ST_Dump", "*ST_dumppoints", "*ST_dumprings", "ST_Intersection", "*ST_linetocurve", "*ST_memunion",
"*ST_minimumboundingcircle", "*ST_polygonize", "*ST_shift_longitude", "ST_Simplify",
"ST_SimplifyPreserveTopology", "ST_SymDifference", "ST_Union",
# 7.10. Linear Referencing
"ST_Line_Interpolate_Point", "ST_Line_Locate_Point", "ST_Line_Substring",
"*ST_locate_along_measure", "*ST_locate_between_measures", "*ST_locatebetweenelevations",
"*ST_addmeasure",
# 7.11. Long Transactions Support
"*addauth", "*checkauth", "*disablelongtransactions", "*enablelongtransactions", "*lockrow",
"*unlockrows",
# 7.12. Miscellaneous Functions
"*ST_accum", "*box2d", "*box3d", "*ST_estimated_extent", "*ST_expand", "ST_Extent", "*ST_extent3d",
"*find_srid", "*ST_mem_size", "*ST_point_inside_circle", "ST_XMax", "ST_XMin", "ST_YMax", "ST_YMin",
"ST_ZMax", "ST_ZMin",
# 7.13. Exceptional Functions
"*postgis_addbbox", "*postgis_dropbbox", "*postgis_hasbbox",
# Raster functions
"AddRasterConstraints", "DropRasterConstraints", "AddOverviewConstraints", "DropOverviewConstraints",
"PostGIS_GDAL_Version", "PostGIS_Raster_Lib_Build_Date", "PostGIS_Raster_Lib_Version", "ST_GDALDrivers",
"UpdateRasterSRID", "ST_CreateOverview", "ST_AddBand", "ST_AsRaster", "ST_Band", "ST_MakeEmptyCoverage",
"ST_MakeEmptyRaster", "ST_Tile", "ST_Retile", "ST_FromGDALRaster", "ST_GeoReference", "ST_Height",
"ST_IsEmpty", "ST_MemSize", "ST_MetaData", "ST_NumBands", "ST_PixelHeight", "ST_PixelWidth", "ST_ScaleX",
"ST_ScaleY", "ST_RasterToWorldCoord", "ST_RasterToWorldCoordX", "ST_RasterToWorldCoordY", "ST_Rotation",
"ST_SkewX", "ST_SkewY", "ST_SRID", "ST_Summary", "ST_UpperLeftX", "ST_UpperLeftY", "ST_Width",
"ST_WorldToRasterCoord", "ST_WorldToRasterCoordX", "ST_WorldToRasterCoordY", "ST_BandMetaData",
"ST_BandNoDataValue", "ST_BandIsNoData", "ST_BandPath", "ST_BandFileSize", "ST_BandFileTimestamp",
"ST_BandPixelType", "ST_MinPossibleValue", "ST_HasNoBand", "ST_PixelAsPolygon", "ST_PixelAsPolygons",
"ST_PixelAsPoint", "ST_PixelAsPoints", "ST_PixelAsCentroid", "ST_PixelAsCentroids", "ST_Value",
"ST_NearestValue", "ST_Neighborhood", "ST_SetValue", "ST_SetValues", "ST_DumpValues", "ST_PixelOfValue",
"ST_SetGeoReference", "ST_SetRotation", "ST_SetScale", "ST_SetSkew", "ST_SetSRID", "ST_SetUpperLeft",
"ST_Resample", "ST_Rescale", "ST_Reskew", "ST_SnapToGrid", "ST_Resize", "ST_Transform",
"ST_SetBandNoDataValue", "ST_SetBandIsNoData", "ST_SetBandPath", "ST_SetBandIndex", "ST_Count",
"ST_CountAgg", "ST_Histogram", "ST_Quantile", "ST_SummaryStats", "ST_SummaryStatsAgg", "ST_ValueCount",
"ST_RastFromWKB", "ST_RastFromHexWKB", "ST_AsBinary", "ST_AsWKB", "ST_AsHexWKB", "ST_AsGDALRaster",
"ST_AsJPEG", "ST_AsPNG", "ST_AsTIFF", "ST_Clip", "ST_ColorMap", "ST_Grayscale", "ST_Intersection",
"ST_MapAlgebra", "ST_MapAlgebraExpr", "ST_MapAlgebraFct", "ST_MapAlgebraFctNgb", "ST_Reclass", "ST_Union",
"ST_Distinct4ma", "ST_InvDistWeight4ma", "ST_Max4ma", "ST_Mean4ma", "ST_Min4ma", "ST_MinDist4ma",
"ST_Range4ma", "ST_StdDev4ma", "ST_Sum4ma", "ST_Aspect", "ST_HillShade", "ST_Roughness", "ST_Slope",
"ST_TPI", "ST_TRI",
]
# constants
constants = ["null", "false", "true"]
postgis_constants = []
def getSqlDictionary(spatial=True):
def strip_star(s):
if s[0] == '*':
return s.lower()[1:]
else:
return s.lower()
k, c, f = list(keywords), list(constants), list(functions)
if spatial:
k += postgis_keywords
f += postgis_functions
c += postgis_constants
return {'keyword': list(map(strip_star, k)), 'constant': list(map(strip_star, c)), 'function': list(map(strip_star, f))}
def getQueryBuilderDictionary():
# concat functions
def ff(l):
return [s for s in l if s[0] != '*']
def add_paren(l):
return [s + "(" for s in l]
foo = sorted(add_paren(ff(list(set.union(set(functions), set(postgis_functions))))))
m = sorted(add_paren(ff(math_functions)))
agg = sorted(add_paren(ff(aggregate_functions)))
op = ff(operators)
s = sorted(add_paren(ff(string_functions)))
return {'function': foo, 'math': m, 'aggregate': agg, 'operator': op, 'string': s}
| gpl-2.0 |
mmbtba/odoo | addons/website_membership/models/membership.py | 221 | 1642 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class membership_membership_line(osv.Model):
_inherit = 'membership.membership_line'
def get_published_companies(self, cr, uid, ids, limit=None, context=None):
if not ids:
return []
limit_clause = '' if limit is None else ' LIMIT %d' % limit
cr.execute('SELECT DISTINCT p.id \
FROM res_partner p INNER JOIN membership_membership_line m \
ON p.id = m.partner \
WHERE website_published AND is_company AND m.id IN %s ' + limit_clause, (tuple(ids),))
return [partner_id[0] for partner_id in cr.fetchall()]
| agpl-3.0 |
bdang2012/taiga-back-casting | taiga/base/api/views.py | 1 | 16558 | # Copyright (C) 2014-2015 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2015 Jesús Espino <[email protected]>
# Copyright (C) 2014-2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This code is partially taken from django-rest-framework:
# Copyright (c) 2011-2014, Tom Christie
import json
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.http.response import HttpResponseBase
from django.views.decorators.csrf import csrf_exempt
from django.views.defaults import server_error
from django.views.generic import View
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from .request import Request
from .settings import api_settings
from .utils import formatting
from taiga.base import status
from taiga.base import exceptions
from taiga.base.response import Response
from taiga.base.response import Ok
from taiga.base.response import NotFound
from taiga.base.response import Forbidden
from taiga.base.utils.iterators import as_tuple
def get_view_name(view_cls, suffix=None):
"""
Given a view class, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting.
"""
name = view_cls.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
if suffix:
name += ' ' + suffix
return name
def get_view_description(view_cls, html=False):
"""
Given a view class, return a textual description to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_DESCRIPTION_FUNCTION` setting.
"""
description = view_cls.__doc__ or ''
description = formatting.dedent(smart_text(description))
if html:
return formatting.markup_description(description)
return description
def exception_handler(exc):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's builtin `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait
return Response({'detail': exc.detail},
status=exc.status_code,
headers=headers)
elif isinstance(exc, Http404):
return NotFound({'detail': _('Not found')})
elif isinstance(exc, PermissionDenied):
return Forbidden({'detail': _('Permission denied')})
# Note: Unhandled exceptions will raise a 500 error.
return None
class APIView(View):
# The following policies may be set at either globally, or per-view.
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
parser_classes = api_settings.DEFAULT_PARSER_CLASSES
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES
throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES
permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES
content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS
# Allow dependancy injection of other settings to make testing easier.
settings = api_settings
@classmethod
def as_view(cls, **initkwargs):
"""
Store the original class on the view function.
This allows us to discover information about the view when we do URL
reverse lookups. Used for breadcrumb generation.
"""
view = super(APIView, cls).as_view(**initkwargs)
view.cls = cls
return view
@property
def allowed_methods(self):
"""
Wrap Django's private `_allowed_methods` interface in a public property.
"""
return self._allowed_methods()
@property
def default_response_headers(self):
headers = {
'Allow': ', '.join(self.allowed_methods),
}
if len(self.renderer_classes) > 1:
headers['Vary'] = 'Accept'
return headers
def http_method_not_allowed(self, request, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
raise exceptions.MethodNotAllowed(request.method)
def permission_denied(self, request):
"""
If request is not permitted, determine what kind of exception to raise.
"""
if not request.successful_authenticator:
raise exceptions.NotAuthenticated()
raise exceptions.PermissionDenied()
def throttled(self, request, wait):
"""
If request is throttled, determine what kind of exception to raise.
"""
raise exceptions.Throttled(wait)
def get_authenticate_header(self, request):
"""
If a request is unauthenticated, determine the WWW-Authenticate
header to use for 401 responses, if any.
"""
authenticators = self.get_authenticators()
if authenticators:
return authenticators[0].authenticate_header(request)
def get_parser_context(self, http_request):
"""
Returns a dict that is passed through to Parser.parse(),
as the `parser_context` keyword argument.
"""
# Note: Additionally `request` and `encoding` will also be added
# to the context by the Request object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {})
}
def get_renderer_context(self):
"""
Returns a dict that is passed through to Renderer.render(),
as the `renderer_context` keyword argument.
"""
# Note: Additionally 'response' will also be added to the context,
# by the Response object.
return {
'view': self,
'args': getattr(self, 'args', ()),
'kwargs': getattr(self, 'kwargs', {}),
'request': getattr(self, 'request', None)
}
def get_view_name(self):
"""
Return the view name, as used in OPTIONS responses and in the
browsable API.
"""
func = self.settings.VIEW_NAME_FUNCTION
return func(self.__class__, getattr(self, 'suffix', None))
def get_view_description(self, html=False):
"""
Return some descriptive text for the view, as used in OPTIONS responses
and in the browsable API.
"""
func = self.settings.VIEW_DESCRIPTION_FUNCTION
return func(self.__class__, html)
# API policy instantiation methods
def get_format_suffix(self, **kwargs):
"""
Determine if the request includes a '.json' style format suffix
"""
if self.settings.FORMAT_SUFFIX_KWARG:
return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)
def get_renderers(self):
"""
Instantiates and returns the list of renderers that this view can use.
"""
return [renderer() for renderer in self.renderer_classes]
def get_parsers(self):
"""
Instantiates and returns the list of parsers that this view can use.
"""
return [parser() for parser in self.parser_classes]
def get_authenticators(self):
"""
Instantiates and returns the list of authenticators that this view can use.
"""
return [auth() for auth in self.authentication_classes]
@as_tuple
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
for permcls in self.permission_classes:
instance = permcls(request=self.request,
view=self)
yield instance
def get_throttles(self):
"""
Instantiates and returns the list of throttles that this view uses.
"""
return [throttle() for throttle in self.throttle_classes]
def get_content_negotiator(self):
"""
Instantiate and return the content negotiation class to use.
"""
if not getattr(self, '_negotiator', None):
self._negotiator = self.content_negotiation_class()
return self._negotiator
# API policy implementation methods
def perform_content_negotiation(self, request, force=False):
"""
Determine which renderer and media type to use render the response.
"""
renderers = self.get_renderers()
conneg = self.get_content_negotiator()
try:
return conneg.select_renderer(request, renderers, self.format_kwarg)
except Exception:
if force:
return (renderers[0], renderers[0].media_type)
raise
def perform_authentication(self, request):
"""
Perform authentication on the incoming request.
Note that if you override this and simply 'pass', then authentication
will instead be performed lazily, the first time either
`request.user` or `request.auth` is accessed.
"""
request.user
def check_permissions(self, request, action:str=None, obj=None):
if action is None:
self.permission_denied(request)
for permission in self.get_permissions():
if not permission.check_permissions(action=action, obj=obj):
self.permission_denied(request)
def check_object_permissions(self, request, obj):
self.check_permissions(request, None, obj)
def check_throttles(self, request):
"""
Check if request should be throttled.
Raises an appropriate exception if the request is throttled.
"""
for throttle in self.get_throttles():
if not throttle.allow_request(request, self):
self.throttled(request, throttle.wait())
# Dispatch methods
def initialize_request(self, request, *args, **kwargs):
"""
Returns the initial request object.
"""
parser_context = self.get_parser_context(request)
return Request(request,
parsers=self.get_parsers(),
authenticators=self.get_authenticators(),
negotiator=self.get_content_negotiator(),
parser_context=parser_context)
def initial(self, request, *args, **kwargs):
"""
Runs anything that needs to occur prior to calling the method handler.
"""
self.format_kwarg = self.get_format_suffix(**kwargs)
# Ensure that the incoming request is permitted
self.perform_authentication(request)
self.check_throttles(request)
# Perform content negotiation and store the accepted info on the request
neg = self.perform_content_negotiation(request)
request.accepted_renderer, request.accepted_media_type = neg
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, HttpResponseBase), ('Expected a `Response`, `HttpResponse` or '
'`HttpStreamingResponse` to be returned from the view, '
'but received a `%s`' % type(response))
if isinstance(response, Response):
if not getattr(request, 'accepted_renderer', None):
neg = self.perform_content_negotiation(request, force=True)
request.accepted_renderer, request.accepted_media_type = neg
response.accepted_renderer = request.accepted_renderer
response.accepted_media_type = request.accepted_media_type
response.renderer_context = self.get_renderer_context()
for key, value in self.headers.items():
response[key] = value
return response
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
exc.auth_header = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
response = self.settings.EXCEPTION_HANDLER(exc)
if response is None:
raise
response.exception = True
return response
# Note: session based authentication is explicitly CSRF validated,
# all other authentication is CSRF exempt.
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
request = self.initialize_request(request, *args, **kwargs)
self.request = request
self.headers = self.default_response_headers
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(request, *args, **kwargs)
except Exception as exc:
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
def options(self, request, *args, **kwargs):
"""
Handler method for HTTP 'OPTIONS' request.
We may as well implement this as Django will otherwise provide
a less useful default implementation.
"""
return Ok(self.metadata(request))
def metadata(self, request):
"""
Return a dictionary of metadata about the view.
Used to return responses for OPTIONS requests.
"""
# By default we can't provide any form-like information, however the
# generic views override this implementation and add additional
# information for POST and PUT methods, based on the serializer.
ret = SortedDict()
ret['name'] = self.get_view_name()
ret['description'] = self.get_view_description()
ret['renders'] = [renderer.media_type for renderer in self.renderer_classes]
ret['parses'] = [parser.media_type for parser in self.parser_classes]
return ret
def api_server_error(request, *args, **kwargs):
if settings.DEBUG is False and request.META.get('CONTENT_TYPE', None) == "application/json":
return HttpResponse(json.dumps({"error": _("Server application error")}),
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return server_error(request, *args, **kwargs)
| agpl-3.0 |
glennw/servo | tests/wpt/web-platform-tests/old-tests/webdriver/windows/window_manipulation.py | 142 | 1556 | # -*- mode: python; fill-column: 100; comment-column: 100; -*-
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.common import exceptions
class WindowingTest(base_test.WebDriverBaseTest):
def test_maximize(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.maximize_window()
def test_window_size_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_size(400, 400)
window_size = self.driver.get_window_size()
self.assertTrue("width" in window_size)
self.assertTrue("height" in window_size)
self.assertEquals({"width": 400, "height":400}, window_size)
"""
todo: make that work
see: https://w3c.github.io/webdriver/webdriver-spec.html#setwindowsize
result = self.driver.set_window_size(100, 100)
self.assertTrue("status" in result)
self.assertEquals(result["status"], 500)
"""
def test_window_position_manipulation(self):
#self.driver.get(self.webserver.where_is("windows/res/win1.html"))
self.driver.set_window_position(400, 400)
window_position = self.driver.get_window_position()
self.assertTrue("x" in window_position)
self.assertTrue("y" in window_position)
self.assertEquals({"x": 400, "y": 400}, window_position)
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
awanke/bokeh | examples/glyphs/calendars.py | 45 | 3431 | from __future__ import absolute_import, print_function
from calendar import Calendar, day_abbr as day_abbrs, month_name as month_names
from bokeh.models import GridPlot, Plot, ColumnDataSource, FactorRange, CategoricalAxis, HoverTool
from bokeh.models.glyphs import Text, Rect
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.sampledata.us_holidays import us_holidays
def make_calendar(year, month, firstweekday="Mon"):
firstweekday = list(day_abbrs).index(firstweekday)
calendar = Calendar(firstweekday=firstweekday)
month_days = [ None if not day else str(day) for day in calendar.itermonthdays(year, month) ]
month_weeks = len(month_days)//7
workday = "linen"
weekend = "lightsteelblue"
def weekday(date):
return (date.weekday() - firstweekday) % 7
def pick_weekdays(days):
return [ days[i % 7] for i in range(firstweekday, firstweekday+7) ]
day_names = pick_weekdays(day_abbrs)
week_days = pick_weekdays([workday]*5 + [weekend]*2)
source = ColumnDataSource(data=dict(
days = list(day_names)*month_weeks,
weeks = sum([ [str(week)]*7 for week in range(month_weeks) ], []),
month_days = month_days,
day_backgrounds = sum([week_days]*month_weeks, []),
))
holidays = [ (date, summary.replace("(US-OPM)", "").strip()) for (date, summary) in us_holidays
if date.year == year and date.month == month and "(US-OPM)" in summary ]
holidays_source = ColumnDataSource(data=dict(
holidays_days = [ day_names[weekday(date)] for date, _ in holidays ],
holidays_weeks = [ str((weekday(date.replace(day=1)) + date.day) // 7) for date, _ in holidays ],
month_holidays = [ summary for _, summary in holidays ],
))
xdr = FactorRange(factors=list(day_names))
ydr = FactorRange(factors=list(reversed([ str(week) for week in range(month_weeks) ])))
plot = Plot(title=month_names[month], x_range=xdr, y_range=ydr, plot_width=300, plot_height=300, outline_line_color=None)
plot.title_text_color = "darkolivegreen"
rect = Rect(x="days", y="weeks", width=0.9, height=0.9, fill_color="day_backgrounds", line_color="silver")
plot.add_glyph(source, rect)
rect = Rect(x="holidays_days", y="holidays_weeks", width=0.9, height=0.9, fill_color="pink", line_color="indianred")
rect_renderer = plot.add_glyph(holidays_source, rect)
text = Text(x="days", y="weeks", text="month_days", text_align="center", text_baseline="middle")
plot.add_glyph(source, text)
xaxis = CategoricalAxis()
xaxis.major_label_text_font_size = "8pt"
xaxis.major_label_standoff = 0
xaxis.major_tick_line_color = None
xaxis.axis_line_color = None
plot.add_layout(xaxis, 'above')
hover_tool = HoverTool(plot=plot, renderers=[rect_renderer], tooltips=[("Holiday", "@month_holidays")])
plot.tools.append(hover_tool)
return plot
months = [ [ make_calendar(2014, 3*i + j + 1) for j in range(3) ] for i in range(4) ]
grid = GridPlot(title="Calendar 2014", toolbar_location=None, children=months)
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "calendars.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Calendar 2014"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
wdzhou/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PoldiDataAnalysis.py | 3 | 13806 | # pylint: disable=no-init,invalid-name,attribute-defined-outside-init,too-many-instance-attributes
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
class PoldiDataAnalysis(PythonAlgorithm):
"""
This workflow algorithm uses all of the POLDI specific algorithms to perform a complete data analysis,
starting from the correlation method and preliminary 1D-fits, proceeding with either one or two passses
of 2D-fitting.
All resulting workspaces are grouped together at the end so that they are all in one place.
"""
def category(self):
return "SINQ\\Poldi"
def name(self):
return "PoldiDataAnalysis"
def summary(self):
return "Run all necessary steps for a complete analysis of POLDI data."
def checkGroups(self):
return False
def PyInit(self):
self._allowedFunctions = ["Gaussian", "Lorentzian", "PseudoVoigt", "Voigt"]
self._globalParameters = {
'Gaussian': [],
'Lorentzian': [],
'PseudoVoigt': ['Mixing'],
'Voigt': ['LorentzFWHM']
}
self.declareProperty(WorkspaceProperty(name="InputWorkspace", defaultValue="", direction=Direction.Input),
doc='MatrixWorkspace with 2D POLDI data and valid POLDI instrument.')
self.declareProperty("MaximumPeakNumber", 10, direction=Direction.Input,
doc='Maximum number of peaks to process in the analysis.')
self.declareProperty("MinimumPeakSeparation", 10, direction=Direction.Input,
doc='Minimum number of points between neighboring peaks.')
self.declareProperty("MinimumPeakHeight", 0.0, direction=Direction.Input,
doc=('Minimum height of peaks. If it is left at 0, the minimum peak height is calculated'
'from background noise.'))
self.declareProperty("MaximumRelativeFwhm", 0.02, direction=Direction.Input,
doc=('Peaks with a relative FWHM larger than this are removed during the 1D fit.'))
self.declareProperty("ScatteringContributions", "1", direction=Direction.Input,
doc=('If there is more than one compound, you may supply estimates of their scattering '
'contributions, which sometimes improves indexing.'))
self.declareProperty(WorkspaceProperty("ExpectedPeaks", defaultValue="", direction=Direction.Input),
doc='TableWorkspace or WorkspaceGroup with expected peaks used for indexing.')
self.declareProperty("RemoveUnindexedPeaksFor2DFit", defaultValue=False, direction=Direction.Input,
doc='Discard unindexed peaks for 2D fit, this is always the case if PawleyFit is active.')
allowedProfileFunctions = StringListValidator(self._allowedFunctions)
self.declareProperty("ProfileFunction", "Gaussian", validator=allowedProfileFunctions,
direction=Direction.Input)
self.declareProperty("TieProfileParameters", True, direction=Direction.Input,
doc=('If this option is activated, certain parameters are kept the same for all peaks. '
'An example is the mixing parameter of the PseudoVoigt function.'))
self.declareProperty("PawleyFit", False, direction=Direction.Input,
doc='Should the 2D-fit determine lattice parameters?')
self.declareProperty("MultipleRuns", False, direction=Direction.Input,
doc=('If this is activated, peaks are searched again in the'
'residuals and the 1D- and 2D-fit is repeated '
'with these data.'))
self.declareProperty("PlotResult", True, direction=Direction.Input,
doc=('If this is activated, plot the sum of residuals and calculated spectrum together '
'with the theoretical spectrum and the residuals.'))
self.declareProperty("OutputIntegratedIntensities", False, direction=Direction.Input,
doc=("If this option is checked the peak intensities of the 2D-fit will be integrated, "
"otherwise they will be the maximum intensity."))
self.declareProperty('OutputRawFitParameters', False, direction=Direction.Input,
doc=('Activating this option produces an output workspace which contains the raw '
'fit parameters.'))
self.declareProperty(WorkspaceProperty(name="OutputWorkspace", defaultValue="", direction=Direction.Output),
doc='WorkspaceGroup with result data from all processing steps.')
def PyExec(self):
self.outputWorkspaces = []
self.baseName = self.getProperty("InputWorkspace").valueAsStr
self.inputWorkspace = self.getProperty("InputWorkspace").value
self.expectedPeaks = self.getProperty("ExpectedPeaks").value
self.profileFunction = self.getProperty("ProfileFunction").value
self.useGlobalParameters = self.getProperty("TieProfileParameters").value
self.maximumRelativeFwhm = self.getProperty("MaximumRelativeFwhm").value
self.outputIntegratedIntensities = self.getProperty("OutputIntegratedIntensities").value
self.globalParameters = ''
if self.useGlobalParameters:
self.globalParameters = ','.join(self._globalParameters[self.profileFunction])
if not self.workspaceHasCounts(self.inputWorkspace):
raise RuntimeError("Aborting analysis since workspace " + self.baseName + " does not contain any counts.")
correlationSpectrum = self.runCorrelation()
self.outputWorkspaces.append(correlationSpectrum)
self.numberOfExecutions = 0
self.outputWorkspaces += self.runMainAnalysis(correlationSpectrum)
outputWs = GroupWorkspaces(self.outputWorkspaces[0])
for ws in self.outputWorkspaces[1:]:
outputWs.add(ws.name())
RenameWorkspace(outputWs, self.getProperty("OutputWorkspace").valueAsStr)
self.setProperty("OutputWorkspace", outputWs)
def workspaceHasCounts(self, workspace):
integrated = Integration(workspace)
summed = SumSpectra(integrated)
counts = summed.readY(0)[0]
DeleteWorkspace(integrated)
DeleteWorkspace(summed)
return counts > 0
def runCorrelation(self):
correlationName = self.baseName + "_correlation"
PoldiAutoCorrelation(self.inputWorkspace, OutputWorkspace=correlationName)
return AnalysisDataService.retrieve(correlationName)
def runMainAnalysis(self, correlationSpectrum):
self.numberOfExecutions += 1
outputWorkspaces = []
rawPeaks = self.runPeakSearch(correlationSpectrum)
outputWorkspaces.append(rawPeaks)
refinedPeaks, fitPlots = self.runPeakFit1D(correlationSpectrum, rawPeaks)
outputWorkspaces.append(refinedPeaks)
outputWorkspaces.append(fitPlots)
indexedPeaks, unindexedPeaks = self.runIndex(refinedPeaks)
outputWorkspaces.append(indexedPeaks)
pawleyFit = self.getProperty('PawleyFit').value
if pawleyFit:
outputWorkspaces.append(unindexedPeaks)
fitPeaks2DResult = self.runPeakFit2D(indexedPeaks)
outputWorkspaces += fitPeaks2DResult
spectrum2D = fitPeaks2DResult[0]
spectrum1D = fitPeaks2DResult[1]
residuals = self.runResidualAnalysis(spectrum2D)
outputWorkspaces.append(residuals)
totalName = self.baseName + "_sum"
Plus(LHSWorkspace=spectrum1D, RHSWorkspace=residuals, OutputWorkspace=totalName)
total = AnalysisDataService.retrieve(totalName)
outputWorkspaces.append(total)
if self.numberOfExecutions == 1:
self._plotResult(total, spectrum1D, residuals)
runTwice = self.getProperty('MultipleRuns').value
if runTwice and self.numberOfExecutions == 1:
return self.runMainAnalysis(total)
else:
return outputWorkspaces
def runPeakSearch(self, correlationWorkspace):
peaksName = self.baseName + "_peaks_raw"
PoldiPeakSearch(InputWorkspace=correlationWorkspace,
MaximumPeakNumber=self.getProperty('MaximumPeakNumber').value,
MinimumPeakSeparation=self.getProperty('MinimumPeakSeparation').value,
MinimumPeakHeight=self.getProperty('MinimumPeakHeight').value,
OutputWorkspace=peaksName)
return AnalysisDataService.retrieve(peaksName)
def runPeakFit1D(self, correlationWorkspace, rawPeaks):
refinedPeaksName = self.baseName + "_peaks_refined_1d"
plotNames = self.baseName + "_fit_plots"
PoldiFitPeaks1D(InputWorkspace=correlationWorkspace,
PoldiPeakTable=rawPeaks,
FwhmMultiples=3.0,
MaximumRelativeFwhm=self.maximumRelativeFwhm,
PeakFunction=self.profileFunction,
OutputWorkspace=refinedPeaksName,
FitPlotsWorkspace=plotNames)
return AnalysisDataService.retrieve(refinedPeaksName), AnalysisDataService.retrieve(plotNames)
def runIndex(self, peaks):
indexedPeaksName = self.baseName + "_indexed"
PoldiIndexKnownCompounds(InputWorkspace=peaks,
CompoundWorkspaces=self.expectedPeaks,
ScatteringContributions=self.getProperty("ScatteringContributions").value,
OutputWorkspace=indexedPeaksName)
indexedPeaks = AnalysisDataService.retrieve(indexedPeaksName)
# Remove unindexed peaks from group for pawley fit
unindexedPeaks = indexedPeaks.getItem(indexedPeaks.getNumberOfEntries() - 1)
pawleyFit = self.getProperty('PawleyFit').value
removeUnindexed = self.getProperty('RemoveUnindexedPeaksFor2DFit').value
if removeUnindexed or pawleyFit:
indexedPeaks.remove(unindexedPeaks.name())
self._removeEmptyTablesFromGroup(indexedPeaks)
return indexedPeaks, unindexedPeaks
def runPeakFit2D(self, peaks):
spectrum2DName = self.baseName + "_fit2d"
spectrum1DName = self.baseName + "_fit1d"
refinedPeaksName = self.baseName + "_peaks_refined_2d"
refinedCellName = self.baseName + "_cell_refined"
pawleyFit = self.getProperty('PawleyFit').value
rawFitParametersWorkspaceName = ''
outputRawFitParameters = self.getProperty('OutputRawFitParameters').value
if outputRawFitParameters:
rawFitParametersWorkspaceName = self.baseName + "_raw_fit_parameters"
PoldiFitPeaks2D(InputWorkspace=self.inputWorkspace,
PoldiPeakWorkspace=peaks,
PeakProfileFunction=self.profileFunction,
GlobalParameters=self.globalParameters,
PawleyFit=pawleyFit,
MaximumIterations=100,
OutputWorkspace=spectrum2DName,
Calculated1DSpectrum=spectrum1DName,
RefinedPoldiPeakWorkspace=refinedPeaksName,
OutputIntegratedIntensities=self.outputIntegratedIntensities,
RefinedCellParameters=refinedCellName,
RawFitParameters=rawFitParametersWorkspaceName)
workspaces = [AnalysisDataService.retrieve(spectrum2DName),
AnalysisDataService.retrieve(spectrum1DName),
AnalysisDataService.retrieve(refinedPeaksName)]
if AnalysisDataService.doesExist(refinedCellName):
workspaces.append(AnalysisDataService.retrieve(refinedCellName))
if AnalysisDataService.doesExist(rawFitParametersWorkspaceName):
workspaces.append(AnalysisDataService.retrieve(rawFitParametersWorkspaceName))
return workspaces
def runResidualAnalysis(self, calculated2DSpectrum):
residualName = self.baseName + "_residuals"
PoldiAnalyseResiduals(MeasuredCountData=self.inputWorkspace,
FittedCountData=calculated2DSpectrum,
MaxIterations=5,
OutputWorkspace=residualName)
return AnalysisDataService.retrieve(residualName)
def _removeEmptyTablesFromGroup(self, groupWorkspace):
deleteNames = []
for i in range(groupWorkspace.getNumberOfEntries()):
ws = groupWorkspace.getItem(i)
if ws.rowCount() == 0:
deleteNames.append(ws.name())
for name in deleteNames:
DeleteWorkspace(name)
def _plotResult(self, total, spectrum1D, residuals):
plotResults = self.getProperty('PlotResult').value
if plotResults:
from IndirectImport import import_mantidplot
plot = import_mantidplot()
plotWindow = plot.plotSpectrum(total, 0, type=1)
plotWindow = plot.plotSpectrum(spectrum1D, 0, type=0, window=plotWindow)
plotWindow = plot.plotSpectrum(residuals, 0, type=0, window=plotWindow)
plotWindow.activeLayer().setTitle('Fit result for ' + self.baseName)
plotWindow.activeLayer().removeLegend()
AlgorithmFactory.subscribe(PoldiDataAnalysis())
| gpl-3.0 |
appliedx/edx-platform | lms/djangoapps/verify_student/tests/test_models.py | 33 | 33741 | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import ddt
import json
import requests.exceptions
import pytz
from django.conf import settings
from django.db.utils import IntegrityError
from django.test import TestCase
from mock import patch
from nose.tools import assert_is_none, assert_equals, assert_raises, assert_true, assert_false # pylint: disable=no-name-in-module
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from opaque_keys.edx.keys import CourseKey
from verify_student.models import (
SoftwareSecurePhotoVerification,
VerificationException, VerificationCheckpoint,
VerificationStatus, SkippedReverification,
VerificationDeadline
)
FAKE_SETTINGS = {
"SOFTWARE_SECURE": {
"FACE_IMAGE_AES_KEY": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
"RSA_PUBLIC_KEY": """-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu2fUn20ZQtDpa1TKeCA/
rDA2cEeFARjEr41AP6jqP/k3O7TeqFX6DgCBkxcjojRCs5IfE8TimBHtv/bcSx9o
7PANTq/62ZLM9xAMpfCcU6aAd4+CVqQkXSYjj5TUqamzDFBkp67US8IPmw7I2Gaa
tX8ErZ9D7ieOJ8/0hEiphHpCZh4TTgGuHgjon6vMV8THtq3AQMaAQ/y5R3V7Lezw
dyZCM9pBcvcH+60ma+nNg8GVGBAW/oLxILBtg+T3PuXSUvcu/r6lUFMHk55pU94d
9A/T8ySJm379qU24ligMEetPk1o9CUasdaI96xfXVDyFhrzrntAmdD+HYCSPOQHz
iwIDAQAB
-----END PUBLIC KEY-----""",
"API_URL": "http://localhost/verify_student/fake_endpoint",
"AWS_ACCESS_KEY": "FAKEACCESSKEY",
"AWS_SECRET_KEY": "FAKESECRETKEY",
"S3_BUCKET": "fake-bucket"
}
}
class MockKey(object):
"""
Mocking a boto S3 Key object. It's a really dumb mock because once we
write data to S3, we never read it again. We simply generate a link to it
and pass that to Software Secure. Because of that, we don't even implement
the ability to pull back previously written content in this mock.
Testing that the encryption/decryption roundtrip on the data works is in
test_ssencrypt.py
"""
def __init__(self, bucket):
self.bucket = bucket
def set_contents_from_string(self, contents):
self.contents = contents
def generate_url(self, duration):
return "http://fake-edx-s3.edx.org/"
class MockBucket(object):
"""Mocking a boto S3 Bucket object."""
def __init__(self, name):
self.name = name
class MockS3Connection(object):
"""Mocking a boto S3 Connection"""
def __init__(self, access_key, secret_key):
pass
def get_bucket(self, bucket_name):
return MockBucket(bucket_name)
def mock_software_secure_post(url, headers=None, data=None, **kwargs):
"""
Mocks our interface when we post to Software Secure. Does basic assertions
on the fields we send over to make sure we're not missing headers or giving
total garbage.
"""
data_dict = json.loads(data)
# Basic sanity checking on the keys
EXPECTED_KEYS = [
"EdX-ID", "ExpectedName", "PhotoID", "PhotoIDKey", "SendResponseTo",
"UserPhoto", "UserPhotoKey",
]
for key in EXPECTED_KEYS:
assert_true(
data_dict.get(key),
"'{}' must be present and not blank in JSON submitted to Software Secure".format(key)
)
# The keys should be stored as Base64 strings, i.e. this should not explode
photo_id_key = data_dict["PhotoIDKey"].decode("base64")
user_photo_key = data_dict["UserPhotoKey"].decode("base64")
response = requests.Response()
response.status_code = 200
return response
def mock_software_secure_post_error(url, headers=None, data=None, **kwargs):
"""
Simulates what happens if our post to Software Secure is rejected, for
whatever reason.
"""
response = requests.Response()
response.status_code = 400
return response
def mock_software_secure_post_unavailable(url, headers=None, data=None, **kwargs):
"""Simulates a connection failure when we try to submit to Software Secure."""
raise requests.exceptions.ConnectionError
# Lots of patching to stub in our own settings, S3 substitutes, and HTTP posting
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@patch('verify_student.models.S3Connection', new=MockS3Connection)
@patch('verify_student.models.Key', new=MockKey)
@patch('verify_student.models.requests.post', new=mock_software_secure_post)
@ddt.ddt
class TestPhotoVerification(ModuleStoreTestCase):
def test_state_transitions(self):
"""
Make sure we can't make unexpected status transitions.
The status transitions we expect are::
→ → → must_retry
↑ ↑ ↓
created → ready → submitted → approved
↓ ↑ ↓
↓ → → denied
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
assert_equals(attempt.status, "created")
# These should all fail because we're in the wrong starting state.
assert_raises(VerificationException, attempt.submit)
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
# Now let's fill in some values so that we can pass the mark_ready() call
attempt.mark_ready()
assert_equals(attempt.status, "ready")
# ready (can't approve or deny unless it's "submitted")
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
DENY_ERROR_MSG = '[{"photoIdReasons": ["Not provided"]}]'
# must_retry
attempt.status = "must_retry"
attempt.system_error("System error")
attempt.approve()
attempt.status = "must_retry"
attempt.deny(DENY_ERROR_MSG)
# submitted
attempt.status = "submitted"
attempt.deny(DENY_ERROR_MSG)
attempt.status = "submitted"
attempt.approve()
# approved
assert_raises(VerificationException, attempt.submit)
attempt.approve() # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.deny(DENY_ERROR_MSG)
# denied
assert_raises(VerificationException, attempt.submit)
attempt.deny(DENY_ERROR_MSG) # no-op
attempt.system_error("System error") # no-op, something processed it without error
attempt.approve()
def test_name_freezing(self):
"""
You can change your name prior to marking a verification attempt ready,
but changing your name afterwards should not affect the value in the
in the attempt record. Basically, we want to always know what your name
was when you submitted it.
"""
user = UserFactory.create()
user.profile.name = u"Jack \u01B4" # gratuious non-ASCII char to test encodings
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Clyde \u01B4"
attempt.mark_ready()
user.profile.name = u"Rusty \u01B4"
assert_equals(u"Clyde \u01B4", attempt.name)
def create_and_submit(self):
"""Helper method to create a generic submission and send it."""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
user.profile.name = u"Rust\u01B4"
attempt.upload_face_image("Just pretend this is image data")
attempt.upload_photo_id_image("Hey, we're a photo ID")
attempt.mark_ready()
attempt.submit()
return attempt
def test_submissions(self):
"""Test that we set our status correctly after a submission."""
# Basic case, things go well.
attempt = self.create_and_submit()
assert_equals(attempt.status, "submitted")
# We post, but Software Secure doesn't like what we send for some reason
with patch('verify_student.models.requests.post', new=mock_software_secure_post_error):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
# We try to post, but run into an error (in this case a newtork connection error)
with patch('verify_student.models.requests.post', new=mock_software_secure_post_unavailable):
attempt = self.create_and_submit()
assert_equals(attempt.status, "must_retry")
def test_active_for_user(self):
"""
Make sure we can retrive a user's active (in progress) verification
attempt.
"""
user = UserFactory.create()
# This user has no active at the moment...
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# Create an attempt and mark it ready...
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.mark_ready()
assert_equals(attempt, SoftwareSecurePhotoVerification.active_for_user(user))
# A new user won't see this...
user2 = UserFactory.create()
user2.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user2))
# If it's got a different status, it doesn't count
for status in ["submitted", "must_retry", "approved", "denied"]:
attempt.status = status
attempt.save()
assert_is_none(SoftwareSecurePhotoVerification.active_for_user(user))
# But if we create yet another one and mark it ready, it passes again.
attempt_2 = SoftwareSecurePhotoVerification(user=user)
attempt_2.mark_ready()
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# And if we add yet another one with a later created time, we get that
# one instead. We always want the most recent attempt marked ready()
attempt_3 = SoftwareSecurePhotoVerification(
user=user,
created_at=attempt_2.created_at + timedelta(days=1)
)
attempt_3.save()
# We haven't marked attempt_3 ready yet, so attempt_2 still wins
assert_equals(attempt_2, SoftwareSecurePhotoVerification.active_for_user(user))
# Now we mark attempt_3 ready and expect it to come back
attempt_3.mark_ready()
assert_equals(attempt_3, SoftwareSecurePhotoVerification.active_for_user(user))
def test_user_is_verified(self):
"""
Test to make sure we correctly answer whether a user has been verified.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.save()
# If it's any of these, they're not verified...
for status in ["created", "ready", "denied", "submitted", "must_retry"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_is_verified(user), status)
attempt.status = "approved"
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_is_verified(user), attempt.status)
def test_user_has_valid_or_pending(self):
"""
Determine whether we have to prompt this user to verify, or if they've
already at least initiated a verification submission.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
# If it's any of these statuses, they don't have anything outstanding
for status in ["created", "ready", "denied"]:
attempt.status = status
attempt.save()
assert_false(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
# Any of these, and we are. Note the benefit of the doubt we're giving
# -- must_retry, and submitted both count until we hear otherwise
for status in ["submitted", "must_retry", "approved"]:
attempt.status = status
attempt.save()
assert_true(SoftwareSecurePhotoVerification.user_has_valid_or_pending(user), status)
def test_user_status(self):
# test for correct status when no error returned
user = UserFactory.create()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('none', ''))
# test for when one has been created
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'approved'
attempt.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# create another one for the same user, make sure the right one is
# returned
attempt2 = SoftwareSecurePhotoVerification(user=user)
attempt2.status = 'denied'
attempt2.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
attempt2.save()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('approved', ''))
# now delete the first one and verify that the denial is being handled
# properly
attempt.delete()
status = SoftwareSecurePhotoVerification.user_status(user)
self.assertEquals(status, ('must_reverify', "No photo ID was provided."))
def test_parse_error_msg_success(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
attempt.error_msg = '[{"photoIdReasons": ["Not provided"]}]'
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals("No photo ID was provided.", parsed_error_msg)
def test_parse_error_msg_failure(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.status = 'denied'
# when we can't parse into json
bad_messages = {
'Not Provided',
'[{"IdReasons": ["Not provided"]}]',
'{"IdReasons": ["Not provided"]}',
u'[{"ïḋṚëäṡöṅṡ": ["Ⓝⓞⓣ ⓟⓡⓞⓥⓘⓓⓔⓓ "]}]',
}
for msg in bad_messages:
attempt.error_msg = msg
parsed_error_msg = attempt.parsed_error_msg()
self.assertEquals(parsed_error_msg, "There was an error verifying your ID photos.")
def test_active_at_datetime(self):
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Not active before the created date
before = attempt.created_at - timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(before))
# Active immediately after created date
after_created = attempt.created_at + timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(after_created))
# Active immediately before expiration date
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
self.assertTrue(attempt.active_at_datetime(before_expiration))
# Not active after the expiration date
after = expiration + timedelta(seconds=1)
self.assertFalse(attempt.active_at_datetime(after))
def test_verification_for_datetime(self):
user = UserFactory.create()
now = datetime.now(pytz.UTC)
# No attempts in the query set, so should return None
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(now, query)
self.assertIs(result, None)
# Should also return None if no deadline specified
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertIs(result, None)
# Make an attempt
attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Before the created date, should get no results
before = attempt.created_at - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before, query)
self.assertIs(result, None)
# Immediately after the created date, should get the attempt
after_created = attempt.created_at + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after_created, query)
self.assertEqual(result, attempt)
# If no deadline specified, should return first available
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(None, query)
self.assertEqual(result, attempt)
# Immediately before the expiration date, should get the attempt
expiration = attempt.created_at + timedelta(days=settings.VERIFY_STUDENT["DAYS_GOOD_FOR"])
before_expiration = expiration - timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(before_expiration, query)
self.assertEqual(result, attempt)
# Immediately after the expiration date, should not get the attempt
after = expiration + timedelta(seconds=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(after, query)
self.assertIs(result, None)
# Create a second attempt in the same window
second_attempt = SoftwareSecurePhotoVerification.objects.create(user=user)
# Now we should get the newer attempt
deadline = second_attempt.created_at + timedelta(days=1)
query = SoftwareSecurePhotoVerification.objects.filter(user=user)
result = SoftwareSecurePhotoVerification.verification_for_datetime(deadline, query)
self.assertEqual(result, second_attempt)
@ddt.unpack
@ddt.data(
{'enrollment_mode': 'honor', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'verified', 'status': False, 'output': 'Not ID Verified'},
{'enrollment_mode': 'verified', 'status': True, 'output': 'ID Verified'},
)
def test_verification_status_for_user(self, enrollment_mode, status, output):
"""
Verify verification_status_for_user returns correct status.
"""
user = UserFactory.create()
course = CourseFactory.create()
with patch('verify_student.models.SoftwareSecurePhotoVerification.user_is_verified') as mock_verification:
mock_verification.return_value = status
status = SoftwareSecurePhotoVerification.verification_status_for_user(user, course.id, enrollment_mode)
self.assertEqual(status, output)
@ddt.ddt
class VerificationCheckpointTest(ModuleStoreTestCase):
"""Tests for the VerificationCheckpoint model. """
def setUp(self):
super(VerificationCheckpointTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.checkpoint_midterm = u'i4x://{org}/{course}/edx-reverification-block/midterm_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.checkpoint_final = u'i4x://{org}/{course}/edx-reverification-block/final_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
@ddt.data('midterm', 'final')
def test_get_or_create_verification_checkpoint(self, checkpoint):
"""
Test that a reverification checkpoint is created properly.
"""
checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/{checkpoint}'.format(
org=self.course.id.org, course=self.course.id.course, checkpoint=checkpoint
)
# create the 'VerificationCheckpoint' checkpoint
verification_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=checkpoint_location
)
self.assertEqual(
VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, checkpoint_location),
verification_checkpoint
)
def test_get_or_create_verification_checkpoint_for_not_existing_values(self):
# Retrieving a checkpoint that doesn't yet exist will create it
location = u'i4x://edX/DemoX/edx-reverification-block/invalid_location'
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(self.course.id, location)
self.assertIsNot(checkpoint, None)
self.assertEqual(checkpoint.course_id, self.course.id)
self.assertEqual(checkpoint.checkpoint_location, location)
def test_get_or_create_integrity_error(self):
# Create the checkpoint
VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.checkpoint_midterm,
)
# Simulate that the get-or-create operation raises an IntegrityError
# This can happen when two processes both try to get-or-create at the same time
# when the database is set to REPEATABLE READ.
with patch.object(VerificationCheckpoint.objects, "get_or_create") as mock_get_or_create:
mock_get_or_create.side_effect = IntegrityError
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(
self.course.id,
self.checkpoint_midterm
)
# The checkpoint should be retrieved without error
self.assertEqual(checkpoint.course_id, self.course.id)
self.assertEqual(checkpoint.checkpoint_location, self.checkpoint_midterm)
def test_unique_together_constraint(self):
"""
Test the unique together constraint.
"""
# create the VerificationCheckpoint checkpoint
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
# test creating the VerificationCheckpoint checkpoint with same course
# id and checkpoint name
with self.assertRaises(IntegrityError):
VerificationCheckpoint.objects.create(course_id=self.course.id, checkpoint_location=self.checkpoint_midterm)
def test_add_verification_attempt_software_secure(self):
"""
Test adding Software Secure photo verification attempts for the
reverification checkpoints.
"""
# adding two check points.
first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_midterm
)
second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id, checkpoint_location=self.checkpoint_final
)
# make an attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 1)
# make another attempt for the 'first_checkpoint'
first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
self.assertEqual(first_checkpoint.photo_verification.count(), 2)
# make new attempt for the 'second_checkpoint'
attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user)
second_checkpoint.add_verification_attempt(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 1)
# remove the attempt from 'second_checkpoint'
second_checkpoint.photo_verification.remove(attempt)
self.assertEqual(second_checkpoint.photo_verification.count(), 0)
@ddt.ddt
class VerificationStatusTest(ModuleStoreTestCase):
""" Tests for the VerificationStatus model. """
def setUp(self):
super(VerificationStatusTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
self.first_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/first_checkpoint_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
self.first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.first_checkpoint_location
)
self.second_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/second_checkpoint_uuid'.\
format(org=self.course.id.org, course=self.course.id.course)
self.second_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.second_checkpoint_location
)
@ddt.data('submitted', "approved", "denied", "error")
def test_add_verification_status(self, status):
""" Adding verification status using the class method. """
# adding verification status
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=status
)
# test the status from database
result = VerificationStatus.objects.filter(checkpoint=self.first_checkpoint)[0]
self.assertEqual(result.status, status)
self.assertEqual(result.user, self.user)
@ddt.data("approved", "denied", "error")
def test_add_status_from_checkpoints(self, status):
"""Test verification status for reverification checkpoints after
submitting software secure photo verification.
"""
# add initial verification status for checkpoints
initial_status = "submitted"
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status=initial_status
)
VerificationStatus.add_verification_status(
checkpoint=self.second_checkpoint,
user=self.user,
status=initial_status
)
# now add verification status for multiple checkpoint points
VerificationStatus.add_status_from_checkpoints(
checkpoints=[self.first_checkpoint, self.second_checkpoint], user=self.user, status=status
)
# test that verification status entries with new status have been added
# for both checkpoints
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.first_checkpoint)
self.assertEqual(len(result), len(self.first_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.first_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
result = VerificationStatus.objects.filter(user=self.user, checkpoint=self.second_checkpoint)
self.assertEqual(len(result), len(self.second_checkpoint.checkpoint_status.all()))
self.assertEqual(
list(result.values_list('checkpoint__checkpoint_location', flat=True)),
list(self.second_checkpoint.checkpoint_status.values_list('checkpoint__checkpoint_location', flat=True))
)
def test_get_location_id(self):
"""
Getting location id for a specific checkpoint.
"""
# creating software secure attempt against checkpoint
self.first_checkpoint.add_verification_attempt(SoftwareSecurePhotoVerification.objects.create(user=self.user))
# add initial verification status for checkpoint
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted',
)
attempt = SoftwareSecurePhotoVerification.objects.filter(user=self.user)
self.assertIsNotNone(VerificationStatus.get_location_id(attempt))
self.assertEqual(VerificationStatus.get_location_id(None), '')
def test_get_user_attempts(self):
"""
Test adding verification status.
"""
VerificationStatus.add_verification_status(
checkpoint=self.first_checkpoint,
user=self.user,
status='submitted'
)
actual_attempts = VerificationStatus.get_user_attempts(
self.user.id,
self.course.id,
self.first_checkpoint_location
)
self.assertEqual(actual_attempts, 1)
class SkippedReverificationTest(ModuleStoreTestCase):
"""
Tests for the SkippedReverification model.
"""
def setUp(self):
super(SkippedReverificationTest, self).setUp()
self.user = UserFactory.create()
self.course = CourseFactory.create()
dummy_checkpoint_location = u'i4x://edX/DemoX/edx-reverification-block/midterm_uuid'
self.checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=dummy_checkpoint_location
)
def test_add_skipped_attempts(self):
"""
Test 'add_skipped_reverification_attempt' method.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(course_id=self.course.id)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, self.user)
self.assertEqual(result.course_id, self.course.id)
def test_unique_constraint(self):
"""Test that adding skipped re-verification with same user and course
id will raise 'IntegrityError' exception.
"""
# add verification object
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
with self.assertRaises(IntegrityError):
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
# create skipped attempt for different user
user2 = UserFactory.create()
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=user2.id, course_id=unicode(self.course.id)
)
# test the status of skipped reverification from database
result = SkippedReverification.objects.filter(user=user2)[0]
self.assertEqual(result.checkpoint, self.checkpoint)
self.assertEqual(result.user, user2)
self.assertEqual(result.course_id, self.course.id)
def test_check_user_skipped_reverification_exists(self):
"""
Test the 'check_user_skipped_reverification_exists' method's response.
"""
# add verification status
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.checkpoint, user_id=self.user.id, course_id=unicode(self.course.id)
)
self.assertTrue(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=self.user.id,
course_id=self.course.id
)
)
user2 = UserFactory.create()
self.assertFalse(
SkippedReverification.check_user_skipped_reverification_exists(
user_id=user2.id,
course_id=self.course.id
)
)
class VerificationDeadlineTest(TestCase):
"""
Tests for the VerificationDeadline model.
"""
def test_caching(self):
deadlines = {
CourseKey.from_string("edX/DemoX/Fall"): datetime.now(pytz.UTC),
CourseKey.from_string("edX/DemoX/Spring"): datetime.now(pytz.UTC) + timedelta(days=1)
}
course_keys = deadlines.keys()
# Initially, no deadlines are set
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
# Create the deadlines
for course_key, deadline in deadlines.iteritems():
VerificationDeadline.objects.create(
course_key=course_key,
deadline=deadline,
)
# Warm the cache
with self.assertNumQueries(1):
VerificationDeadline.deadlines_for_courses(course_keys)
# Load the deadlines from the cache
with self.assertNumQueries(0):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, deadlines)
# Delete the deadlines
VerificationDeadline.objects.all().delete()
# Verify that the deadlines are updated correctly
with self.assertNumQueries(1):
all_deadlines = VerificationDeadline.deadlines_for_courses(course_keys)
self.assertEqual(all_deadlines, {})
| agpl-3.0 |
HellerCommaA/flask-angular | lib/python2.7/site-packages/sqlalchemy/connectors/zxJDBC.py | 18 | 1882 | # connectors/zxJDBC.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sys
from . import Connector
class ZxJDBCConnector(Connector):
driver = 'zxjdbc'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_unicode_binds = True
supports_unicode_statements = sys.version > '2.5.0+'
description_encoding = None
default_paramstyle = 'qmark'
jdbc_db_name = None
jdbc_driver_name = None
@classmethod
def dbapi(cls):
from com.ziclix.python.sql import zxJDBC
return zxJDBC
def _driver_kwargs(self):
"""Return kw arg dict to be sent to connect()."""
return {}
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
url.port is not None
and ':%s' % url.port or '',
url.database)
def create_connect_args(self, url):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[self._create_jdbc_url(url),
url.username, url.password,
self.jdbc_driver_name],
opts]
def is_disconnect(self, e, connection, cursor):
if not isinstance(e, self.dbapi.ProgrammingError):
return False
e = str(e)
return 'connection is closed' in e or 'cursor is closed' in e
def _get_server_version_info(self, connection):
# use connection.connection.dbversion, and parse appropriately
# to get a tuple
raise NotImplementedError()
| mit |
x007007007/supervisor-monitor | versioneer.py | 386 | 68611 |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| mit |
wilvk/ansible | lib/ansible/modules/cloud/amazon/ec2_lc_facts.py | 20 | 7274 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_lc_facts
short_description: Gather facts about AWS Autoscaling Launch Configurations
description:
- Gather facts about AWS Autoscaling Launch Configurations
version_added: "2.3"
author: "Loïc Latreille (@psykotox)"
requirements: [ boto3 ]
options:
name:
description:
- A name or a list of name to match.
required: false
default: []
sort:
description:
- Optional attribute which with to sort the results.
choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all launch configurations
- ec2_lc_facts:
# Gather facts about launch configuration with name "example"
- ec2_lc_facts:
name: example
# Gather facts sorted by created_time from most recent to least recent
- ec2_lc_facts:
sort: created_time
sort_order: descending
'''
RETURN = '''
block_device_mapping:
description: Block device mapping for the instances of launch configuration
type: list
returned: always
sample: "[{
'device_name': '/dev/xvda':,
'ebs': {
'delete_on_termination': true,
'volume_size': 8,
'volume_type': 'gp2'
}]"
classic_link_vpc_security_groups:
description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
type: string
returned: always
sample:
created_time:
description: The creation date and time for the launch configuration
type: string
returned: always
sample: "2016-05-27T13:47:44.216000+00:00"
ebs_optimized:
description: EBS I/O optimized (true ) or not (false )
type: bool
returned: always
sample: true,
image_id:
description: ID of the Amazon Machine Image (AMI)
type: string
returned: always
sample: "ami-12345678"
instance_monitoring:
description: Launched with detailed monitoring or not
type: dict
returned: always
sample: "{
'enabled': true
}"
instance_type:
description: Instance type
type: string
returned: always
sample: "t2.micro"
kernel_id:
description: ID of the kernel associated with the AMI
type: string
returned: always
sample:
key_name:
description: Name of the key pair
type: string
returned: always
sample: "user_app"
launch_configuration_arn:
description: Amazon Resource Name (ARN) of the launch configuration
type: string
returned: always
sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
launch_configuration_name:
description: Name of the launch configuration
type: string
returned: always
sample: "lc-app"
ramdisk_id:
description: ID of the RAM disk associated with the AMI
type: string
returned: always
sample:
security_groups:
description: Security groups to associated
type: list
returned: always
sample: "[
'web'
]"
user_data:
description: User data available
type: string
returned: always
sample:
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
def list_launch_configs(connection, module):
launch_config_name = module.params.get("name")
sort = module.params.get('sort')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
try:
pg = connection.get_paginator('describe_launch_configurations')
launch_configs = pg.paginate(LaunchConfigurationNames=launch_config_name).build_full_result()
except ClientError as e:
module.fail_json(msg=e.message)
snaked_launch_configs = []
for launch_config in launch_configs['LaunchConfigurations']:
snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
for launch_config in snaked_launch_configs:
if 'CreatedTime' in launch_config:
launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
if sort:
snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order == 'descending'))
try:
if sort and sort_start and sort_end:
snaked_launch_configs = snaked_launch_configs[int(sort_start):int(sort_end)]
elif sort and sort_start:
snaked_launch_configs = snaked_launch_configs[int(sort_start):]
elif sort and sort_end:
snaked_launch_configs = snaked_launch_configs[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(launch_configurations=snaked_launch_configs)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=False, default=[], type='list'),
sort=dict(required=False, default=None,
choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
sort_order=dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start=dict(required=False),
sort_end=dict(required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_launch_configs(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
akhilaananthram/nupic | examples/opf/experiments/spatial_classification/base/description.py | 32 | 14783 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalClassification',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'field1': {
'fieldname': u'field1',
'n': 121,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
u'classification': {
'classifierOnly': True,
'fieldname': u'classification',
'n': 121,
'name': u'classification',
'type': 'SDRCategoryEncoder',
'w': 21},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': False,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : False,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '0',
},
'anomalyParams': {
u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': None
},
'trainSPNetOnlyIfRequested': False,
},
'dataSource': 'fillInBySubExperiment',
'errorMetric': 'fillInBySubExperiment'
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/frameworks/opf/jsonschema/stream_def.json.
#
'dataset' : { u'info': u'testSpatialClassification',
u'streams': [ { u'columns': [u'*'],
u'info': u'spatialClassification',
u'source': config['dataSource']}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{u'predictedField': u'classification', u'predictionSteps': [0]},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field='classification', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': config['errorMetric'],
'window': 100,
'steps': 0}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
grhawk/ASE | tools/ase/calculators/emt.py | 2 | 7399 | """Effective medium theory potential."""
from math import sqrt, exp, log
import numpy as np
from ase.data import chemical_symbols
from ase.units import Bohr
from ase.calculators.neighborlist import NeighborList
from ase.calculators.calculator import Calculator
parameters = {
# E0 s0 V0 eta2 kappa lambda n0
# eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3
'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700),
'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910),
'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547),
'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703),
'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030),
'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688),
'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802),
# extra parameters - just for fun ...
'H': (-3.21, 1.31, 0.132, 2.652, 2.790, 3.892, 0.00547),
'C': (-3.50, 1.81, 0.332, 1.652, 2.790, 1.892, 0.01322),
'N': (-5.10, 1.88, 0.132, 1.652, 2.790, 1.892, 0.01222),
'O': (-4.60, 1.95, 0.332, 1.652, 2.790, 1.892, 0.00850)}
beta = 1.809 # (16 * pi / 3)**(1.0 / 3) / 2**0.5,
# but preserve historical rounding
class EMT(Calculator):
implemented_properties = ['energy', 'forces']
nolabel = True
def __init__(self):
Calculator.__init__(self)
def initialize(self, atoms):
self.par = {}
self.rc = 0.0
self.numbers = atoms.get_atomic_numbers()
maxseq = max(par[1] for par in parameters.values()) * Bohr
rc = self.rc = beta * maxseq * 0.5 * (sqrt(3) + sqrt(4))
rr = rc * 2 * sqrt(4) / (sqrt(3) + sqrt(4))
self.acut = np.log(9999.0) / (rr - rc)
for Z in self.numbers:
if Z not in self.par:
p = parameters[chemical_symbols[Z]]
s0 = p[1] * Bohr
eta2 = p[3] / Bohr
kappa = p[4] / Bohr
x = eta2 * beta * s0
gamma1 = 0.0
gamma2 = 0.0
for i, n in enumerate([12, 6, 24]):
r = s0 * beta * sqrt(i + 1)
x = n / (12 * (1.0 + exp(self.acut * (r - rc))))
gamma1 += x * exp(-eta2 * (r - beta * s0))
gamma2 += x * exp(-kappa / beta * (r - beta * s0))
self.par[Z] = {'E0': p[0],
's0': s0,
'V0': p[2],
'eta2': eta2,
'kappa': kappa,
'lambda': p[5] / Bohr,
'n0': p[6] / Bohr**3,
'rc': rc,
'gamma1': gamma1,
'gamma2': gamma2}
#if rc + 0.5 > self.rc:
# self.rc = rc + 0.5
self.ksi = {}
for s1, p1 in self.par.items():
self.ksi[s1] = {}
for s2, p2 in self.par.items():
#self.ksi[s1][s2] = (p2['n0'] / p1['n0'] *
# exp(eta1 * (p1['s0'] - p2['s0'])))
self.ksi[s1][s2] = p2['n0'] / p1['n0']
self.forces = np.empty((len(atoms), 3))
self.sigma1 = np.empty(len(atoms))
self.deds = np.empty(len(atoms))
self.nl = NeighborList([0.5 * self.rc + 0.25] * len(atoms),
self_interaction=False)
def calculate(self, atoms=None, properties=['energy'],
system_changes=['positions', 'numbers', 'cell',
'pbc', 'charges','magmoms']):
Calculator.calculate(self, atoms, properties, system_changes)
if 'numbers' in system_changes:
self.initialize(self.atoms)
positions = self.atoms.positions
numbers = self.atoms.numbers
cell = self.atoms.cell
self.nl.update(self.atoms)
self.energy = 0.0
self.sigma1[:] = 0.0
self.forces[:] = 0.0
natoms = len(self.atoms)
for a1 in range(natoms):
Z1 = numbers[a1]
p1 = self.par[Z1]
ksi = self.ksi[Z1]
neighbors, offsets = self.nl.get_neighbors(a1)
offsets = np.dot(offsets, cell)
for a2, offset in zip(neighbors, offsets):
d = positions[a2] + offset - positions[a1]
r = sqrt(np.dot(d, d))
if r < self.rc + 0.5:
Z2 = numbers[a2]
p2 = self.par[Z2]
self.interact1(a1, a2, d, r, p1, p2, ksi[Z2])
for a in range(natoms):
Z = numbers[a]
p = self.par[Z]
try:
ds = -log(self.sigma1[a] / 12) / (beta * p['eta2'])
except (OverflowError, ValueError):
self.deds[a] = 0.0
self.energy -= p['E0']
continue
x = p['lambda'] * ds
y = exp(-x)
z = 6 * p['V0'] * exp(-p['kappa'] * ds)
self.deds[a] = ((x * y * p['E0'] * p['lambda'] + p['kappa'] * z) /
(self.sigma1[a] * beta * p['eta2']))
self.energy += p['E0'] * ((1 + x) * y - 1) + z
for a1 in range(natoms):
Z1 = numbers[a1]
p1 = self.par[Z1]
ksi = self.ksi[Z1]
neighbors, offsets = self.nl.get_neighbors(a1)
offsets = np.dot(offsets, cell)
for a2, offset in zip(neighbors, offsets):
d = positions[a2] + offset - positions[a1]
r = sqrt(np.dot(d, d))
if r < self.rc + 0.5:
Z2 = numbers[a2]
p2 = self.par[Z2]
self.interact2(a1, a2, d, r, p1, p2, ksi[Z2])
self.results['energy'] = self.energy
self.results['forces'] = self.forces
def interact1(self, a1, a2, d, r, p1, p2, ksi):
x = exp(self.acut * (r - self.rc))
theta = 1.0 / (1.0 + x)
y1 = (0.5 * p1['V0'] * exp(-p2['kappa'] * (r / beta - p2['s0'])) *
ksi / p1['gamma2'] * theta)
y2 = (0.5 * p2['V0'] * exp(-p1['kappa'] * (r / beta - p1['s0'])) /
ksi / p2['gamma2'] * theta)
self.energy -= y1 + y2
f = ((y1 * p2['kappa'] + y2 * p1['kappa']) / beta +
(y1 + y2) * self.acut * theta * x) * d / r
self.forces[a1] += f
self.forces[a2] -= f
self.sigma1[a1] += (exp(-p2['eta2'] * (r - beta * p2['s0'])) *
ksi * theta / p1['gamma1'])
self.sigma1[a2] += (exp(-p1['eta2'] * (r - beta * p1['s0'])) /
ksi * theta / p2['gamma1'])
def interact2(self, a1, a2, d, r, p1, p2, ksi):
x = exp(self.acut * (r - self.rc))
theta = 1.0 / (1.0 + x)
y1 = (exp(-p2['eta2'] * (r - beta * p2['s0'])) *
ksi / p1['gamma1'] * theta * self.deds[a1])
y2 = (exp(-p1['eta2'] * (r - beta * p1['s0'])) /
ksi / p2['gamma1'] * theta * self.deds[a2])
f = ((y1 * p2['eta2'] + y2 * p1['eta2']) +
(y1 + y2) * self.acut * theta * x) * d / r
self.forces[a1] -= f
self.forces[a2] += f
| gpl-2.0 |
charbeljc/OCB | addons/hr/res_config.py | 377 | 3452 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_config_settings(osv.osv_memory):
_name = 'hr.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_hr_timesheet_sheet': fields.boolean('Allow timesheets validation by managers',
help ="""This installs the module hr_timesheet_sheet."""),
'module_hr_attendance': fields.boolean('Install attendances feature',
help ="""This installs the module hr_attendance."""),
'module_hr_timesheet': fields.boolean('Manage timesheets',
help ="""This installs the module hr_timesheet."""),
'module_hr_holidays': fields.boolean('Manage holidays, leaves and allocation requests',
help ="""This installs the module hr_holidays."""),
'module_hr_expense': fields.boolean('Manage employees expenses',
help ="""This installs the module hr_expense."""),
'module_hr_recruitment': fields.boolean('Manage the recruitment process',
help ="""This installs the module hr_recruitment."""),
'module_hr_contract': fields.boolean('Record contracts per employee',
help ="""This installs the module hr_contract."""),
'module_hr_evaluation': fields.boolean('Organize employees periodic evaluation',
help ="""This installs the module hr_evaluation."""),
'module_hr_gamification': fields.boolean('Drive engagement with challenges and badges',
help ="""This installs the module hr_gamification."""),
'module_account_analytic_analysis': fields.boolean('Allow invoicing based on timesheets (the sale application will be installed)',
help ="""This installs the module account_analytic_analysis, which will install sales management too."""),
'module_hr_payroll': fields.boolean('Manage payroll',
help ="""This installs the module hr_payroll."""),
}
def onchange_hr_timesheet(self, cr, uid, ids, timesheet, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if timesheet:
return {'value': {'module_hr_attendance': True}}
return {}
def onchange_hr_attendance(self, cr, uid, ids, attendance, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if not attendance:
return {'value': {'module_hr_timesheet': False}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.