repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
bob-the-hamster/commandergenius | project/jni/python/src/Doc/conf.py | 32 | 5656 | # -*- coding: utf-8 -*-
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# Options for HTML output
# -----------------------
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://docs.python.org/dev'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('documenting/index', 'documenting.tex',
'Documenting Python', 'Georg Brandl', 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Using Python', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{[email protected]}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
latex_elements = {'inputenc': '\\usepackage[utf8x]{inputenc}'}
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
| lgpl-2.1 |
lucashmorais/x-Bench | mozmill-env/python/Lib/lib2to3/pgen2/driver.py | 212 | 5164 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
| mit |
benbovy/xarray-simlab | xsimlab/tests/fixture_process.py | 1 | 6399 | from textwrap import dedent
import attr
import pytest
import xsimlab as xs
from xsimlab.process import get_process_obj
@xs.process
class SomeProcess:
"""Just used for foreign variables in ExampleProcess."""
some_var = xs.variable(
groups="some_group", intent="out", global_name="some_global_var"
)
some_od_var = xs.on_demand(groups="some_group")
@some_od_var.compute
def compute_some_od_var(self):
return 1
@xs.process
class AnotherProcess:
"""Just used for foreign variables in ExampleProcess."""
another_var = xs.variable(
description="original description",
attrs={"unit": "m"},
global_name="another_global_var",
)
some_var = xs.foreign(SomeProcess, "some_var")
@xs.process
class ExampleProcess:
"""A process with complete interface for testing."""
in_var = xs.variable(dims=["x", ("x", "y")], description="input variable")
out_var = xs.variable(groups="example_group", intent="out")
inout_var = xs.variable(intent="inout", converter=int)
od_var = xs.on_demand()
obj_var = xs.any_object(description="arbitrary object")
in_foreign_var = xs.foreign(SomeProcess, "some_var")
in_foreign_var2 = xs.foreign(AnotherProcess, "some_var")
out_foreign_var = xs.foreign(AnotherProcess, "another_var", intent="out")
in_foreign_od_var = xs.foreign(SomeProcess, "some_od_var")
in_global_var = xs.global_ref("some_global_var")
out_global_var = xs.global_ref("another_global_var", intent="out")
group_var = xs.group("some_group")
group_dict_var = xs.group_dict("some_group")
other_attrib = attr.attrib(init=False, repr=False)
other_attr = "this is not a xsimlab variable attribute"
@od_var.compute
def compute_od_var(self):
return 0
@pytest.fixture
def example_process_obj():
return get_process_obj(ExampleProcess)
@pytest.fixture(scope="session")
def example_process_repr():
return dedent(
"""\
<ExampleProcess (xsimlab process)>
Variables:
in_var [in] ('x',) or ('x', 'y') input variable
out_var [out]
inout_var [inout]
od_var [out]
obj_var [out] arbitrary object
in_foreign_var [in] <--- SomeProcess.some_var
in_foreign_var2 [in] <--- AnotherProcess.some_var
out_foreign_var [out] ---> AnotherProcess.another_var
in_foreign_od_var [in] <--- SomeProcess.some_od_var
in_global_var [in] <--- <unknown>.<unknown>
out_global_var [out] ---> <unknown>.<unknown>
group_var [in] <--- group 'some_group'
group_dict_var [in] <--- group 'some_group'
Simulation stages:
*no stage implemented*
"""
)
@pytest.fixture(scope="session")
def in_var_details():
return dedent(
"""\
Input variable
Variable properties:
- type : ``variable``
- intent : ``in``
- dimensions : ('x',) or ('x', 'y')
"""
)
def _init_process(p_cls, p_name, model, state, state_keys=None, od_keys=None):
p_obj = get_process_obj(p_cls)
p_obj.__xsimlab_name__ = p_name
p_obj.__xsimlab_model__ = model
p_obj.__xsimlab_state__ = state
p_obj.__xsimlab_state_keys__ = state_keys or {}
p_obj.__xsimlab_od_keys__ = od_keys or {}
return p_obj
@pytest.fixture
def processes_with_state():
class FakeModel:
def __init__(self):
self._processes = {}
model = FakeModel()
state = {}
some_process = _init_process(
SomeProcess,
"some_process",
model,
state,
state_keys={"some_var": ("some_process", "some_var")},
od_keys={"some_od_var": ("some_process", "some_od_var")},
)
another_process = _init_process(
AnotherProcess,
"another_process",
model,
state,
state_keys={
"another_var": ("another_process", "another_var"),
"some_var": ("some_process", "some_var"),
},
)
example_process = _init_process(
ExampleProcess,
"example_process",
model,
state,
state_keys={
"in_var": ("example_process", "in_var"),
"out_var": ("example_process", "out_var"),
"inout_var": ("example_process", "inout_var"),
"obj_var": ("example_process", "obj_var"),
"in_foreign_var": ("some_process", "some_var"),
"in_foreign_var2": ("some_process", "some_var"),
"out_foreign_var": ("another_process", "another_var"),
"in_global_var": ("some_process", "some_var"),
"out_global_var": ("another_process", "another_var"),
"group_var": [("some_process", "some_var")],
"group_dict_var": [("some_process", "some_var")],
},
od_keys={
"od_var": ("example_process", "od_var"),
"in_foreign_od_var": ("some_process", "some_od_var"),
"group_var": [("some_process", "some_od_var")],
"group_dict_var": [("some_process", "some_od_var")],
},
)
model._processes.update(
{
"some_process": some_process,
"another_process": another_process,
"example_process": example_process,
}
)
return some_process, another_process, example_process
@pytest.fixture(scope="session")
def example_process_in_model_repr():
return dedent(
"""\
<ExampleProcess 'example_process' (xsimlab process)>
Variables:
in_var [in] ('x',) or ('x', 'y') input variable
out_var [out]
inout_var [inout]
od_var [out]
obj_var [out] arbitrary object
in_foreign_var [in] <--- some_process.some_var
in_foreign_var2 [in] <--- some_process.some_var
out_foreign_var [out] ---> another_process.another_var
in_foreign_od_var [in] <--- some_process.some_od_var
in_global_var [in] <--- some_process.some_var
out_global_var [out] ---> another_process.another_var
group_var [in] <--- group 'some_group'
group_dict_var [in] <--- group 'some_group'
Simulation stages:
*no stage implemented*
"""
)
| bsd-3-clause |
hyperized/ansible | test/units/modules/network/fortios/test_fortios_switch_controller_switch_interface_tag.py | 21 | 7873 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_switch_controller_switch_interface_tag
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_switch_interface_tag.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_switch_interface_tag_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_switch_interface_tag': {
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'name': 'default_name_3'
}
set_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_switch_interface_tag_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_switch_interface_tag': {
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'name': 'default_name_3'
}
set_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_switch_interface_tag_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_switch_interface_tag': {
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_switch_interface_tag_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_switch_interface_tag': {
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_switch_interface_tag_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_switch_interface_tag': {
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'name': 'default_name_3'
}
set_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_switch_interface_tag_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_switch_interface_tag': {
'random_attribute_not_valid': 'tag',
'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_switch_interface_tag.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'name': 'default_name_3'
}
set_method_mock.assert_called_with('switch-controller', 'switch-interface-tag', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
ngovindaraj/Udacity_Projects | Data_Wrangling/osm_to_csv.py | 1 | 6585 | import csv
import codecs
import pprint
import re
import xml.etree.cElementTree as ET
import cerberus
import schema
# Converting the cleaned osm file to csv
OSM_PATH = "san-francisco_sample.osm"
# OSM_PATH = "san-francisco-modified.osm"
NODES_PATH = "nodes.csv"
NODE_TAGS_PATH = "nodes_tags.csv"
WAYS_PATH = "ways.csv"
WAY_NODES_PATH = "ways_nodes.csv"
WAY_TAGS_PATH = "ways_tags.csv"
LOWER_COLON = re.compile(r'^([a-z]|_)+:([a-z]|_)+')
PROBLEMCHARS = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
SCHEMA = schema.schema
# Make sure the field order in all csv files match the column order in the sql table schema
NODE_FIELDS = ['id', 'lat', 'lon', 'user', 'uid', 'version', 'changeset', 'timestamp']
NODE_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_FIELDS = ['id', 'user', 'uid', 'version', 'changeset', 'timestamp']
WAY_TAGS_FIELDS = ['id', 'key', 'value', 'type']
WAY_NODES_FIELDS = ['id', 'node_id', 'position']
# Common function to process 'nd' child element in 'way' element
def process_child_nd_element(element, child, nds, position):
nd = dict.fromkeys(['id', 'node_id', 'position']) # way-nd
nd['id'] = element.attrib['id']
nd['node_id'] = child.attrib['ref']
nd['position'] = position
position += 1
nds.append(nd)
return position
# Common function to process 'tag' child element in 'node/way' element
def process_child_tag_element(element, child, tags):
if PROBLEMCHARS.match(child.attrib['k']):
return
tag = dict.fromkeys(['type', 'key', 'id', 'value'])
tag['id'] = element.attrib['id']
tag['value'] = child.attrib['v']
if LOWER_COLON.match(child.attrib['k']):
tag['type'] = child.attrib['k'].split(':', 1)[0]
tag['key'] = child.attrib['k'].split(':', 1)[1]
else:
tag['type'] = 'regular'
tag['key'] = child.attrib['k']
tags.append(tag)
def shape_element(element):
"""Clean and shape node or way XML element to Python dict"""
node_attribs = dict.fromkeys(NODE_FIELDS)
way_attribs = dict.fromkeys(WAY_FIELDS)
tags = [] # List of child node (node/way) 'tag' dictionaries
nds = [] # List of child node (way) 'nd' dictionaries
if element.tag == 'node':
for attrib in node_attribs.iterkeys():
if attrib in element.attrib:
node_attribs[attrib] = element.attrib[attrib]
else: # node element is missing attrib we want, so drop node
return None
for child in element:
if child.tag == 'tag':
process_child_tag_element(element, child, tags)
elif element.tag == 'way':
for attrib in way_attribs.iterkeys():
if attrib in element.attrib:
way_attribs[attrib] = element.attrib[attrib]
else: # way element is missing attrib we want, so drop way
return None
pos = 0
for child in element:
if child.tag == 'tag':
process_child_tag_element(element, child, tags)
elif child.tag == 'nd':
pos = process_child_nd_element(element, child, nds, pos)
if element.tag == 'node':
return {'node': node_attribs, 'node_tags': tags}
elif element.tag == 'way':
return {'way': way_attribs, 'way_nodes': nds, 'way_tags': tags}
# ================================================== #
# Helper Functions #
# ================================================== #
def get_element(osm_file, tags=('node', 'way', 'relation')):
"""Yield element if it is the right type of tag"""
context = ET.iterparse(osm_file, events=('start', 'end'))
_, root = next(context)
for event, elem in context:
if event == 'end' and elem.tag in tags:
yield elem
root.clear()
def validate_element(element, validator, schema=SCHEMA):
"""Raise ValidationError if element does not match schema"""
if validator.validate(element, schema) is not True:
print element
field, errors = next(validator.errors.iteritems())
message_string = "\nElement of type '{0}' has following errors:\n{1}"
error_string = pprint.pformat(errors)
raise Exception(message_string.format(field, error_string))
class UnicodeDictWriter(csv.DictWriter, object):
"""Extend csv.DictWriter to handle Unicode input"""
def writerow(self, row):
super(UnicodeDictWriter, self).writerow({
k: (v.encode('utf-8') if isinstance(v, unicode) else v) for k, v in row.iteritems()
})
def writerows(self, rows):
for row in rows:
self.writerow(row)
# ================================================== #
# Main Function #
# ================================================== #
def process_map(file_in, validate):
"""Iteratively process each XML element and write to csv(s)"""
with codecs.open(NODES_PATH, 'w') as nodes_file, \
codecs.open(NODE_TAGS_PATH, 'w') as nodes_tags_file, \
codecs.open(WAYS_PATH, 'w') as ways_file, \
codecs.open(WAY_NODES_PATH, 'w') as way_nodes_file, \
codecs.open(WAY_TAGS_PATH, 'w') as way_tags_file:
nodes_writer = UnicodeDictWriter(nodes_file, NODE_FIELDS)
node_tags_writer = UnicodeDictWriter(nodes_tags_file, NODE_TAGS_FIELDS)
ways_writer = UnicodeDictWriter(ways_file, WAY_FIELDS)
way_nodes_writer = UnicodeDictWriter(way_nodes_file, WAY_NODES_FIELDS)
way_tags_writer = UnicodeDictWriter(way_tags_file, WAY_TAGS_FIELDS)
nodes_writer.writeheader()
node_tags_writer.writeheader()
ways_writer.writeheader()
way_nodes_writer.writeheader()
way_tags_writer.writeheader()
validator = cerberus.Validator()
for element in get_element(file_in, tags=('node', 'way')):
el = shape_element(element)
if el:
if validate is True:
validate_element(el, validator)
if element.tag == 'node':
nodes_writer.writerow(el['node'])
node_tags_writer.writerows(el['node_tags'])
elif element.tag == 'way':
ways_writer.writerow(el['way'])
way_nodes_writer.writerows(el['way_nodes'])
way_tags_writer.writerows(el['way_tags'])
if __name__ == '__main__':
# Note: Validation is ~ 10X slower. For the project consider using a small
# sample of the map when validating.
process_map(OSM_PATH, validate=True)
| mit |
AICP/external_chromium_org | chrome/common/extensions/docs/server2/path_canonicalizer.py | 16 | 4879 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _Normalize(file_name, splittext=False):
normalized = file_name
if splittext:
normalized = posixpath.splitext(file_name)[0]
normalized = normalized.replace('.', '').replace('-', '').replace('_', '')
return normalized.lower()
def _CommonNormalizedPrefix(first_file, second_file):
return posixpath.commonprefix((_Normalize(first_file),
_Normalize(second_file)))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
cached_future = self._cache.GetMulti(('canonical_paths',
'simplified_paths_map'))
def resolve():
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
cached = cached_future.Get()
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_Normalize(path, splittext=True)].append(
canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return Future(callback=resolve)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
# Paths with a non-extension dot separator lose information in
# _SimplifyFileName, so we try paths both with and without the dot to
# maximize the possibility of finding the right path.
potential_paths = (
simplified_paths_map.get(_Normalize(base), []) +
simplified_paths_map.get(_Normalize(base, splittext=True), []))
if potential_paths == []:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared without symbols, not the simplified form of |path|,
# which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(_CommonNormalizedPrefix(max_prefix, path))
for path_for_file in potential_paths[1:]:
prefix_length = len(_CommonNormalizedPrefix(path_for_file, path))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Cron(self):
return self._LoadCache()
| bsd-3-clause |
clinton-hall/nzbToMedia | libs/common/subliminal/video.py | 14 | 7852 | # -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime, timedelta
import logging
import os
from guessit import guessit
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm' '.ogv', '.omf',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo',
'.vob', '.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object):
"""Base class for videos.
Represent a video, existing or not.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param str imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages.
"""
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists"""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
return cls.fromguess(name, guessit(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of the series.
:param bool original_series: whether the series is the first with this name.
:param int tvdb_id: TVDB id of the episode.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, series, season, episode, title=None, year=None, original_series=True, tvdb_id=None,
series_tvdb_id=None, series_imdb_id=None, **kwargs):
super(Episode, self).__init__(name, **kwargs)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: The series is the first with this name
self.original_series = original_series
#: TVDB id of the episode
self.tvdb_id = tvdb_id
#: TVDB id of the series
self.series_tvdb_id = series_tvdb_id
#: IMDb id of the series
self.series_imdb_id = series_imdb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'title' not in guess or 'episode' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], guess.get('season', 1), guess['episode'], title=guess.get('episode_title'),
year=guess.get('year'), format=guess.get('format'), original_series='year' not in guess,
release_group=guess.get('release_group'), resolution=guess.get('screen_size'),
video_codec=guess.get('video_codec'), audio_codec=guess.get('audio_codec'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'episode'}))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
:param str title: title of the movie.
:param int year: year of the movie.
:param \*\*kwargs: additional parameters for the :class:`Video` constructor.
"""
def __init__(self, name, title, year=None, **kwargs):
super(Movie, self).__init__(name, **kwargs)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('release_group'),
resolution=guess.get('screen_size'), video_codec=guess.get('video_codec'),
audio_codec=guess.get('audio_codec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guessit(name, {'type': 'movie'}))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
| gpl-3.0 |
thingsinjars/electron | tools/js2asar.py | 7 | 1416 | #!/usr/bin/env python
import errno
import os
import shutil
import subprocess
import sys
import tempfile
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
archive = sys.argv[1]
js_source_files = sys.argv[2:]
output_dir = tempfile.mkdtemp()
copy_js(js_source_files, output_dir)
call_asar(archive, output_dir)
shutil.rmtree(output_dir)
def copy_js(js_source_files, output_dir):
for source_file in js_source_files:
output_filename = os.path.splitext(source_file)[0] + '.js'
output_path = os.path.join(output_dir, output_filename)
safe_mkdir(os.path.dirname(output_path))
shutil.copy2(source_file, output_path)
def call_asar(archive, output_dir):
js_dir = os.path.join(output_dir, 'atom')
asar = os.path.join(SOURCE_ROOT, 'node_modules', 'asar', 'bin', 'asar')
subprocess.check_call([find_node(), asar, 'pack', js_dir, archive])
def find_node():
WINDOWS_NODE_PATHs = [
'C:/Program Files (x86)/nodejs',
'C:/Program Files/nodejs',
] + os.environ['PATH'].split(os.pathsep)
if sys.platform in ['win32', 'cygwin']:
for path in WINDOWS_NODE_PATHs:
full_path = os.path.join(path, 'node.exe')
if os.path.exists(full_path):
return full_path
return 'node'
def safe_mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if __name__ == '__main__':
sys.exit(main())
| mit |
alaski/nova | nova/scheduler/scheduler_options.py | 15 | 3340 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SchedulerOptions monitors a local .json file for changes and loads
it if needed. This file is converted to a data structure and passed
into the filtering and weighing functions which can use it for
dynamic configuration.
"""
import datetime
import os
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import timeutils
import nova.conf
from nova.i18n import _LE
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class SchedulerOptions(object):
"""SchedulerOptions monitors a local .json file for changes and loads it
if needed. This file is converted to a data structure and passed into
the filtering and weighing functions which can use it for dynamic
configuration.
"""
def __init__(self):
super(SchedulerOptions, self).__init__()
self.data = {}
self.last_modified = None
self.last_checked = None
def _get_file_handle(self, filename):
"""Get file handle. Broken out for testing."""
return open(filename)
def _get_file_timestamp(self, filename):
"""Get the last modified datetime. Broken out for testing."""
try:
return os.path.getmtime(filename)
except os.error:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Could not stat scheduler options file "
"%(filename)s"),
{'filename': filename})
def _load_file(self, handle):
"""Decode the JSON file. Broken out for testing."""
try:
return jsonutils.load(handle)
except ValueError:
LOG.exception(_LE("Could not decode scheduler options"))
return {}
def _get_time_now(self):
"""Get current UTC. Broken out for testing."""
return timeutils.utcnow()
def get_configuration(self, filename=None):
"""Check the json file for changes and load it if needed."""
if not filename:
filename = CONF.scheduler_json_config_location
if not filename:
return self.data
if self.last_checked:
now = self._get_time_now()
if now - self.last_checked < datetime.timedelta(minutes=5):
return self.data
last_modified = self._get_file_timestamp(filename)
if (not last_modified or not self.last_modified or
last_modified > self.last_modified):
self.data = self._load_file(self._get_file_handle(filename))
self.last_modified = last_modified
if not self.data:
self.data = {}
return self.data
| apache-2.0 |
mecamiratries/printrundensity | printrun/gui/bufferedcanvas.py | 21 | 3286 | """
BufferedCanvas -- flicker-free canvas widget
Copyright (C) 2005, 2006 Daniel Keep, 2011 Duane Johnson
To use this widget, just override or replace the draw method.
This will be called whenever the widget size changes, or when
the update method is explicitly called.
Please submit any improvements/bugfixes/ideas to the following
url:
http://wiki.wxpython.org/index.cgi/BufferedCanvas
2006-04-29: Added bugfix for a crash on Mac provided by Marc Jans.
"""
# Hint: try removing '.sp4msux0rz'
__author__ = 'Daniel Keep <[email protected]>'
__license__ = """
This file is part of the Printrun suite.
Printrun is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Printrun is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Printrun. If not, see <http://www.gnu.org/licenses/>.
"""
__all__ = ['BufferedCanvas']
import wx
class BufferedCanvas(wx.Panel):
"""
Implements a flicker-free canvas widget.
Standard usage is to subclass this class, and override the
draw method. The draw method is passed a device context, which
should be used to do your drawing.
If you want to force a redraw (for whatever reason), you should
call the update method. This is because the draw method is never
called as a result of an EVT_PAINT event.
"""
# These are our two buffers. Just be aware that when the buffers
# are flipped, the REFERENCES are swapped. So I wouldn't want to
# try holding onto explicit references to one or the other ;)
buffer = None
backbuffer = None
def __init__(self,
parent,
ID=-1,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style = wx.NO_FULL_REPAINT_ON_RESIZE | wx.WANTS_CHARS):
wx.Panel.__init__(self, parent, ID, pos, size, style)
# Bind events
self.Bind(wx.EVT_PAINT, self.onPaint)
# Disable background erasing (flicker-licious)
def disable_event(*pargs, **kwargs):
pass # the sauce, please
self.Bind(wx.EVT_ERASE_BACKGROUND, disable_event)
#
# General methods
#
def draw(self, dc, w, h):
"""
Stub: called when the canvas needs to be re-drawn.
"""
pass
def update(self):
"""
Causes the canvas to be updated.
"""
self.Refresh()
def getWidthHeight(self):
width, height = self.GetClientSizeTuple()
if width == 0:
width = 1
if height == 0:
height = 1
return (width, height)
#
# Event handlers
#
def onPaint(self, event):
# Blit the front buffer to the screen
w, h = self.GetClientSizeTuple()
if not w or not h:
return
else:
dc = wx.BufferedPaintDC(self)
self.draw(dc, w, h)
| gpl-3.0 |
sergio-incaser/bank-payment | __unported__/account_banking/wizard/banktools.py | 13 | 11967 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.addons.account_banking import sepa
from openerp.addons.account_banking.struct import struct
__all__ = [
'get_period',
'get_bank_accounts',
'get_partner',
'get_country_id',
'get_company_bank_account',
'create_bank_account',
]
def get_period(pool, cr, uid, date, company, log=None):
'''
Wrapper over account_period.find() to log exceptions of
missing periods instead of raising.
'''
context = {'account_period_prefer_normal': True}
if company:
context['company_id'] = company.id
try:
period_ids = pool.get('account.period').find(
cr, uid, dt=date, context=context)
except Exception as e:
if log is None:
raise
else:
log.append(e)
return False
return period_ids[0]
def get_bank_accounts(pool, cr, uid, account_number, log, fail=False):
'''
Get the bank account with account number account_number
'''
# No need to search for nothing
if not account_number:
return []
partner_bank_obj = pool.get('res.partner.bank')
bank_account_ids = partner_bank_obj.search(cr, uid, [
('acc_number', '=', account_number)
])
if not bank_account_ids:
if not fail:
log.append(
_('Bank account %(account_no)s was not found in the database')
% dict(account_no=account_number)
)
return []
return partner_bank_obj.browse(cr, uid, bank_account_ids)
def _has_attr(obj, attr):
# Needed for dangling addresses and a weird exception scheme in
# OpenERP's orm.
try:
return bool(getattr(obj, attr))
except KeyError:
return False
def get_partner(pool, cr, uid, name, address, postal_code, city,
country_id, log, context=None):
'''
Get the partner belonging to the account holders name <name>
If multiple partners are found with the same name, select the first and
add a warning to the import log.
TODO: revive the search by lines from the address argument
'''
partner_obj = pool.get('res.partner')
partner_ids = partner_obj.search(
cr, uid, [
'|', ('is_company', '=', True), ('parent_id', '=', False),
('name', 'ilike', name),
], context=context)
if not partner_ids:
# Try brute search on address and then match reverse
criteria = []
if country_id:
criteria.append(('country_id', '=', country_id))
if city:
criteria.append(('city', 'ilike', city))
if postal_code:
criteria.append(('zip', 'ilike', postal_code))
partner_search_ids = partner_obj.search(
cr, uid, criteria, context=context)
if (not partner_search_ids and country_id):
# Try again with country_id = False
criteria[0] = ('country_id', '=', False)
partner_search_ids = partner_obj.search(
cr, uid, criteria, context=context)
key = name.lower()
partners = []
for partner in partner_obj.read(
cr, uid, partner_search_ids, ['name', 'commercial_partner_id'],
context=context):
if (len(partner['name']) > 3 and partner['name'].lower() in key):
partners.append(partner)
partners.sort(key=lambda x: len(x['name']), reverse=True)
partner_ids = [x['commercial_partner_id'][0] for x in partners]
if len(partner_ids) > 1:
log.append(
_('More than one possible match found for partner with '
'name %(name)s') % {'name': name})
return partner_ids and partner_ids[0] or False
def get_company_bank_account(pool, cr, uid, account_number, currency,
company, log):
'''
Get the matching bank account for this company. Currency is the ISO code
for the requested currency.
'''
results = struct()
bank_accounts = get_bank_accounts(pool, cr, uid, account_number, log,
fail=True)
if not bank_accounts:
return False
elif len(bank_accounts) != 1:
log.append(
_('More than one bank account was found with the same number '
'%(account_no)s') % dict(account_no=account_number)
)
return False
if bank_accounts[0].partner_id.id != company.partner_id.id:
log.append(
_('Account %(account_no)s is not owned by %(partner)s')
% dict(account_no=account_number,
partner=company.partner_id.name,
))
return False
results.account = bank_accounts[0]
bank_settings_obj = pool.get('account.banking.account.settings')
criteria = [('partner_bank_id', '=', bank_accounts[0].id)]
# Find matching journal for currency
journal_obj = pool.get('account.journal')
journal_ids = journal_obj.search(cr, uid, [
('type', '=', 'bank'),
('currency.name', '=', currency or company.currency_id.name)
])
if currency == company.currency_id.name:
journal_ids_no_curr = journal_obj.search(cr, uid, [
('type', '=', 'bank'), ('currency', '=', False)
])
journal_ids.extend(journal_ids_no_curr)
if journal_ids:
criteria.append(('journal_id', 'in', journal_ids))
# Find bank account settings
bank_settings_ids = bank_settings_obj.search(cr, uid, criteria)
if bank_settings_ids:
settings = bank_settings_obj.browse(cr, uid, bank_settings_ids)[0]
results.company_id = company
results.journal_id = settings.journal_id
# Take currency from settings or from company
if settings.journal_id.currency.id:
results.currency_id = settings.journal_id.currency
else:
results.currency_id = company.currency_id
# Rest just copy/paste from settings. Why am I doing this?
results.default_debit_account_id = settings.default_debit_account_id
results.default_credit_account_id = settings.default_credit_account_id
results.costs_account_id = settings.costs_account_id
results.invoice_journal_id = settings.invoice_journal_id
results.bank_partner_id = settings.bank_partner_id
return results
def get_or_create_bank(pool, cr, uid, bic, online=False, code=None,
name=None, context=None):
'''
Find or create the bank with the provided BIC code.
When online, the SWIFT database will be consulted in order to
provide for missing information.
'''
# UPDATE: Free SWIFT databases are since 2/22/2010 hidden behind an
# image challenge/response interface.
bank_obj = pool.get('res.bank')
# Self generated key?
if len(bic) < 8:
# search key
bank_ids = bank_obj.search(
cr, uid, [
('bic', '=', bic[:6])
])
if not bank_ids:
bank_ids = bank_obj.search(
cr, uid, [
('bic', 'ilike', bic + '%')
])
else:
bank_ids = bank_obj.search(
cr, uid, [
('bic', '=', bic)
])
if bank_ids and len(bank_ids) == 1:
banks = bank_obj.browse(cr, uid, bank_ids)
return banks[0].id, banks[0].country.id
country_obj = pool.get('res.country')
country_ids = country_obj.search(
cr, uid, [('code', '=', bic[4:6])]
)
country_id = country_ids and country_ids[0] or False
bank_id = False
if online:
info, address = bank_obj.online_bank_info(
cr, uid, bic, context=context
)
if info:
bank_id = bank_obj.create(cr, uid, dict(
code=info.code,
name=info.name,
street=address.street,
street2=address.street2,
zip=address.zip,
city=address.city,
country=country_id,
bic=info.bic[:8],
))
else:
info = struct(name=name, code=code)
if not online or not bank_id:
bank_id = bank_obj.create(cr, uid, dict(
code=info.code or 'UNKNOW', # FIXME: Typo?
name=info.name or _('Unknown Bank'),
country=country_id,
bic=bic,
))
return bank_id, country_id
def get_country_id(pool, cr, uid, transaction, context=None):
"""
Derive a country id from the info on the transaction.
:param transaction: browse record of a transaction
:returns: res.country id or False
"""
country_code = False
iban = sepa.IBAN(transaction.remote_account)
if iban.valid:
country_code = iban.countrycode
elif transaction.remote_owner_country_code:
country_code = transaction.remote_owner_country_code
# fallback on the import parsers country code
elif transaction.bank_country_code:
country_code = transaction.bank_country_code
if country_code:
country_ids = pool.get('res.country').search(
cr, uid, [('code', '=', country_code.upper())],
context=context)
country_id = country_ids and country_ids[0] or False
if not country_id:
company = transaction.statement_line_id.company_id
if company.partner_id.country:
country_id = company.partner_id.country.id
return country_id
def create_bank_account(pool, cr, uid, partner_id,
account_number, holder_name, address, city,
country_id, bic=False,
context=None):
'''
Create a matching bank account with this holder for this partner.
'''
values = struct(
partner_id=partner_id,
owner_name=holder_name,
country_id=country_id,
)
# Are we dealing with IBAN?
iban = sepa.IBAN(account_number)
if iban.valid:
# Take as much info as possible from IBAN
values.state = 'iban'
values.acc_number = str(iban)
else:
# No, try to convert to IBAN
values.state = 'bank'
values.acc_number = account_number
if country_id:
country_code = pool.get('res.country').read(
cr, uid, country_id, ['code'], context=context)['code']
if country_code in sepa.IBAN.countries:
account_info = pool['res.partner.bank'].online_account_info(
cr, uid, country_code, values.acc_number, context=context)
if account_info:
values.acc_number = iban = account_info.iban
values.state = 'iban'
bic = account_info.bic
if bic:
values.bank = get_or_create_bank(pool, cr, uid, bic)[0]
values.bank_bic = bic
# Create bank account and return
return pool.get('res.partner.bank').create(
cr, uid, values, context=context)
| agpl-3.0 |
o5k/openerp-oemedical-v0.1 | openerp/addons/auth_openid/__init__.py | 443 | 1090 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_users
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
coderbone/SickRage | lib/subliminal/video.py | 9 | 19384 | # -*- coding: utf-8 -*-
from __future__ import division
from datetime import datetime, timedelta
import hashlib
import logging
import os
import struct
from babelfish import Error as BabelfishError, Language
from enzyme import Error as EnzymeError, MKV
from guessit import guess_episode_info, guess_file_info, guess_movie_info
logger = logging.getLogger(__name__)
#: Video extensions
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.omf', '.ps',
'.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo', '.vob',
'.vro', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
#: Subtitle extensions
SUBTITLE_EXTENSIONS = ('.srt', '.sub', '.smi', '.txt', '.ssa', '.ass', '.mpl')
class Video(object):
"""Base class for videos.
Represent a video, existing or not. Attributes have an associated score based on equations defined in
:mod:`~subliminal.score`.
:param str name: name or path of the video.
:param str format: format of the video (HDTV, WEB-DL, BluRay, ...).
:param str release_group: release group of the video.
:param str resolution: resolution of the video stream (480p, 720p, 1080p or 1080i).
:param str video_codec: codec of the video stream.
:param str audio_codec: codec of the main audio stream.
:param int imdb_id: IMDb id of the video.
:param dict hashes: hashes of the video file by provider names.
:param int size: size of the video file in bytes.
:param set subtitle_languages: existing subtitle languages
"""
#: Score by match property
scores = {}
def __init__(self, name, format=None, release_group=None, resolution=None, video_codec=None, audio_codec=None,
imdb_id=None, hashes=None, size=None, subtitle_languages=None):
#: Name or path of the video
self.name = name
#: Format of the video (HDTV, WEB-DL, BluRay, ...)
self.format = format
#: Release group of the video
self.release_group = release_group
#: Resolution of the video stream (480p, 720p, 1080p or 1080i)
self.resolution = resolution
#: Codec of the video stream
self.video_codec = video_codec
#: Codec of the main audio stream
self.audio_codec = audio_codec
#: IMDb id of the video
self.imdb_id = imdb_id
#: Hashes of the video file by provider names
self.hashes = hashes or {}
#: Size of the video file in bytes
self.size = size
#: Existing subtitle languages
self.subtitle_languages = subtitle_languages or set()
@property
def exists(self):
"""Test whether the video exists."""
return os.path.exists(self.name)
@property
def age(self):
"""Age of the video."""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta()
@classmethod
def fromguess(cls, name, guess):
"""Create an :class:`Episode` or a :class:`Movie` with the given `name` based on the `guess`.
:param str name: name of the video.
:param dict guess: guessed data, like a :class:`~guessit.guess.Guess` instance.
:raise: :class:`ValueError` if the `type` of the `guess` is invalid
"""
if guess['type'] == 'episode':
return Episode.fromguess(name, guess)
if guess['type'] == 'movie':
return Movie.fromguess(name, guess)
raise ValueError('The guess must be an episode or a movie guess')
@classmethod
def fromname(cls, name):
"""Shortcut for :meth:`fromguess` with a `guess` guessed from the `name`.
:param str name: name of the video.
"""
return cls.fromguess(name, guess_file_info(name))
def __repr__(self):
return '<%s [%r]>' % (self.__class__.__name__, self.name)
def __hash__(self):
return hash(self.name)
class Episode(Video):
"""Episode :class:`Video`.
Scores are defined by a set of equations, see :func:`~subliminal.score.solve_episode_equations`
:param str series: series of the episode.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:param str title: title of the episode.
:param int year: year of series.
:param int tvdb_id: TVDB id of the episode
"""
#: Score by match property
scores = {'hash': 137, 'imdb_id': 110, 'tvdb_id': 88, 'series': 44, 'year': 44, 'title': 22, 'season': 11,
'episode': 11, 'release_group': 11, 'format': 6, 'video_codec': 4, 'resolution': 4, 'audio_codec': 2,
'hearing_impaired': 1}
def __init__(self, name, series, season, episode, format=None, release_group=None, resolution=None,
video_codec=None, audio_codec=None, imdb_id=None, hashes=None, size=None, subtitle_languages=None,
title=None, year=None, tvdb_id=None):
super(Episode, self).__init__(name, format, release_group, resolution, video_codec, audio_codec, imdb_id,
hashes, size, subtitle_languages)
#: Series of the episode
self.series = series
#: Season number of the episode
self.season = season
#: Episode number of the episode
self.episode = episode
#: Title of the episode
self.title = title
#: Year of series
self.year = year
#: TVDB id of the episode
self.tvdb_id = tvdb_id
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'episode':
raise ValueError('The guess must be an episode guess')
if 'series' not in guess or 'season' not in guess or 'episodeNumber' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['series'], guess['season'], guess['episodeNumber'], format=guess.get('format'),
release_group=guess.get('releaseGroup'), resolution=guess.get('screenSize'),
video_codec=guess.get('videoCodec'), audio_codec=guess.get('audioCodec'),
title=guess.get('title'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guess_episode_info(name))
def __repr__(self):
if self.year is None:
return '<%s [%r, %dx%d]>' % (self.__class__.__name__, self.series, self.season, self.episode)
return '<%s [%r, %d, %dx%d]>' % (self.__class__.__name__, self.series, self.year, self.season, self.episode)
class Movie(Video):
"""Movie :class:`Video`.
Scores are defined by a set of equations, see :func:`~subliminal.score.solve_movie_equations`
:param str title: title of the movie.
:param int year: year of the movie
"""
#: Score by match property
scores = {'hash': 62, 'imdb_id': 62, 'title': 23, 'year': 12, 'release_group': 11, 'format': 6, 'video_codec': 4,
'resolution': 4, 'audio_codec': 2, 'hearing_impaired': 1}
def __init__(self, name, title, format=None, release_group=None, resolution=None, video_codec=None,
audio_codec=None, imdb_id=None, hashes=None, size=None, subtitle_languages=None, year=None):
super(Movie, self).__init__(name, format, release_group, resolution, video_codec, audio_codec, imdb_id, hashes,
size, subtitle_languages)
#: Title of the movie
self.title = title
#: Year of the movie
self.year = year
@classmethod
def fromguess(cls, name, guess):
if guess['type'] != 'movie':
raise ValueError('The guess must be a movie guess')
if 'title' not in guess:
raise ValueError('Insufficient data to process the guess')
return cls(name, guess['title'], format=guess.get('format'), release_group=guess.get('releaseGroup'),
resolution=guess.get('screenSize'), video_codec=guess.get('videoCodec'),
audio_codec=guess.get('audioCodec'), year=guess.get('year'))
@classmethod
def fromname(cls, name):
return cls.fromguess(name, guess_movie_info(name))
def __repr__(self):
if self.year is None:
return '<%s [%r]>' % (self.__class__.__name__, self.title)
return '<%s [%r, %d]>' % (self.__class__.__name__, self.title, self.year)
def search_external_subtitles(path, directory=None):
"""Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict
"""
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
subtitles = {}
for p in os.listdir(directory or dirpath):
# keep only valid subtitle filenames
if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS):
continue
# extract the potential language code
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
# default language is undefined
language = Language('und')
# attempt to parse
if language_code:
try:
language = Language.fromietf(language_code)
except ValueError:
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles
def scan_video(path, subtitles=True, embedded_subtitles=True, subtitles_dir=None):
"""Scan a video and its subtitle languages from a video `path`.
:param str path: existing path to the video.
:param bool subtitles: scan for subtitles with the same name.
:param bool embedded_subtitles: scan for embedded subtitles.
:param str subtitles_dir: directory to search for subtitles.
:return: the scanned video.
:rtype: :class:`Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(VIDEO_EXTENSIONS):
raise ValueError('%s is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guess_file_info(path))
# size and hashes
video.size = os.path.getsize(path)
if video.size > 10485760:
logger.debug('Size is %d', video.size)
video.hashes['opensubtitles'] = hash_opensubtitles(path)
video.hashes['thesubdb'] = hash_thesubdb(path)
video.hashes['napiprojekt'] = hash_napiprojekt(path)
logger.debug('Computed hashes %r', video.hashes)
else:
logger.warning('Size is lower than 10MB: hashes not computed')
# external subtitles
if subtitles:
video.subtitle_languages |= set(search_external_subtitles(path, directory=subtitles_dir).values())
# video metadata with enzyme
try:
if filename.endswith('.mkv'):
with open(path, 'rb') as f:
mkv = MKV(f)
# main video track
if mkv.video_tracks:
video_track = mkv.video_tracks[0]
# resolution
if video_track.height in (480, 720, 1080):
if video_track.interlaced:
video.resolution = '%di' % video_track.height
else:
video.resolution = '%dp' % video_track.height
logger.debug('Found resolution %s with enzyme', video.resolution)
# video codec
if video_track.codec_id == 'V_MPEG4/ISO/AVC':
video.video_codec = 'h264'
logger.debug('Found video_codec %s with enzyme', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/SP':
video.video_codec = 'DivX'
logger.debug('Found video_codec %s with enzyme', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/ASP':
video.video_codec = 'XviD'
logger.debug('Found video_codec %s with enzyme', video.video_codec)
else:
logger.warning('MKV has no video track')
# main audio track
if mkv.audio_tracks:
audio_track = mkv.audio_tracks[0]
# audio codec
if audio_track.codec_id == 'A_AC3':
video.audio_codec = 'AC3'
logger.debug('Found audio_codec %s with enzyme', video.audio_codec)
elif audio_track.codec_id == 'A_DTS':
video.audio_codec = 'DTS'
logger.debug('Found audio_codec %s with enzyme', video.audio_codec)
elif audio_track.codec_id == 'A_AAC':
video.audio_codec = 'AAC'
logger.debug('Found audio_codec %s with enzyme', video.audio_codec)
else:
logger.warning('MKV has no audio track')
# subtitle tracks
if mkv.subtitle_tracks:
if embedded_subtitles:
embedded_subtitle_languages = set()
for st in mkv.subtitle_tracks:
if st.language:
try:
embedded_subtitle_languages.add(Language.fromalpha3b(st.language))
except BabelfishError:
logger.error('Embedded subtitle track language %r is not a valid language', st.language)
embedded_subtitle_languages.add(Language('und'))
elif st.name:
try:
embedded_subtitle_languages.add(Language.fromname(st.name))
except BabelfishError:
logger.debug('Embedded subtitle track name %r is not a valid language', st.name)
embedded_subtitle_languages.add(Language('und'))
else:
embedded_subtitle_languages.add(Language('und'))
logger.debug('Found embedded subtitle %r with enzyme', embedded_subtitle_languages)
video.subtitle_languages |= embedded_subtitle_languages
else:
logger.debug('MKV has no subtitle track')
except EnzymeError:
logger.exception('Parsing video metadata with enzyme failed')
return video
def scan_videos(path, subtitles=True, embedded_subtitles=True, subtitles_dir=None):
"""Scan `path` for videos and their subtitles.
:param str path: existing directory path to scan.
:param bool subtitles: scan for subtitles with the same name.
:param bool embedded_subtitles: scan for embedded subtitles.
:param str subtitles_dir: directory to search for subtitles.
:return: the scanned videos.
:rtype: list of :class:`Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# walk the path
videos = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %s', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos
if not filename.endswith(VIDEO_EXTENSIONS):
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# scan video
try:
video = scan_video(filepath, subtitles=subtitles, embedded_subtitles=embedded_subtitles,
subtitles_dir=subtitles_dir)
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
continue
videos.append(video)
return videos
def hash_opensubtitles(video_path):
"""Compute a hash using OpenSubtitles' algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
bytesize = struct.calcsize(b'<q')
with open(video_path, 'rb') as f:
filesize = os.path.getsize(video_path)
filehash = filesize
if filesize < 65536 * 2:
return
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF # to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF
returnedhash = '%016x' % filehash
return returnedhash
def hash_thesubdb(video_path):
"""Compute a hash using TheSubDB's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 64 * 1024
if os.path.getsize(video_path) < readsize:
return
with open(video_path, 'rb') as f:
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest()
def hash_napiprojekt(video_path):
"""Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize)
return hashlib.md5(data).hexdigest()
| gpl-3.0 |
dhananjay92/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/wait.py | 263 | 3563 | # Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Waiting functionality."""
import time
from exceptions import NoSuchElementException
from exceptions import TimeoutException
POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method
IGNORED_EXCEPTIONS = [NoSuchElementException] # list of exceptions ignored during calls to the method
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example:
from selenium.webdriver.support.ui import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n
until_not(lambda x: x.find_element_by_id("someId").is_displayed())
"""
self._driver = driver
self._timeout = timeout
self._poll = poll_frequency
# avoid the divide by zero
if self._poll == 0:
self._poll = POLL_FREQUENCY
exceptions = IGNORED_EXCEPTIONS
if ignored_exceptions is not None:
try:
exceptions.extend(iter(ignored_exceptions))
except TypeError: # ignored_exceptions is not iterable
exceptions.append(ignored_exceptions)
self._ignored_exceptions = tuple(exceptions)
def until(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + self._timeout
while(True):
try:
value = method(self._driver)
if value:
return value
except self._ignored_exceptions:
pass
time.sleep(self._poll)
if(time.time() > end_time):
break
raise TimeoutException(message)
def until_not(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is False."""
end_time = time.time() + self._timeout
while(True):
try:
value = method(self._driver)
if not value:
return value
except self._ignored_exceptions:
return True
time.sleep(self._poll)
if(time.time() > end_time):
break
raise TimeoutException(message)
| mpl-2.0 |
shabab12/edx-platform | common/lib/xmodule/xmodule/tests/test_conditional.py | 131 | 12910 | import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationManager
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", source_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationManager(source_location.course_key),
error_msg='random error message'
)
else:
source_descriptor = Mock(name='source_descriptor')
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock(name='child_descriptor')
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
def load_item(usage_id, for_parent=None): # pylint: disable=unused-argument
"""Test-only implementation of load_item that simply returns static xblocks."""
return {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get(usage_id)
descriptor_system.load_item = load_item
system.descriptor_runtime = descriptor_system
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
super(ConditionalModuleBasicTest, self).setUp()
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
super(ConditionalModuleXmlTest, self).setUp()
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, source_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.descriptor_runtime = descriptor._runtime # pylint: disable=protected-access
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
| agpl-3.0 |
littlstar/chromium.src | third_party/tlslite/tlslite/utils/openssl_tripledes.py | 202 | 1788 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto 3DES implementation."""
from .cryptomath import *
from .tripledes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext) | bsd-3-clause |
tuxfux-hlp-notes/python-batches | batch-67/19-files/myenv/lib/python2.7/site-packages/pip/_vendor/pkg_resources/__init__.py | 320 | 103230 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| gpl-3.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/lib_openshift/src/class/oc_adm_csr.py | 18 | 7497 | # pylint: skip-file
# flake8: noqa
class OCcsr(OpenShiftCLI):
''' Class to wrap the oc adm certificate command line'''
kind = 'csr'
# pylint: disable=too-many-arguments
def __init__(self,
nodes=None,
approve_all=False,
service_account=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for oc adm certificate '''
super(OCcsr, self).__init__(None, kubeconfig, verbose)
self.service_account = service_account
self.nodes = self.create_nodes(nodes)
self._csrs = []
self.approve_all = approve_all
self.verbose = verbose
@property
def csrs(self):
'''property for managing csrs'''
# any processing needed??
self._csrs = self._get(resource=self.kind)['results'][0]['items']
return self._csrs
def create_nodes(self, nodes):
'''create a node object to track csr signing status'''
nodes_list = []
if nodes is None:
return nodes_list
results = self._get(resource='nodes')['results'][0]['items']
for node in nodes:
nodes_list.append(dict(name=node, csrs={}, accepted=False, denied=False))
for ocnode in results:
if node in ocnode['metadata']['name']:
nodes_list[-1]['accepted'] = True
return nodes_list
def get(self):
'''get the current certificate signing requests'''
return self.csrs
@staticmethod
def action_needed(csr, action):
'''check to see if csr is in desired state'''
if csr['status'] == {}:
return True
state = csr['status']['conditions'][0]['type']
if action == 'approve' and state != 'Approved':
return True
elif action == 'deny' and state != 'Denied':
return True
return False
def get_csr_request(self, request):
'''base64 decode the request object and call openssl to determine the
subject and specifically the CN: from the request
Output:
(0, '...
Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
...')
'''
import base64
return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
# we need to match based upon the csr's request certificate's CN
if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
if node['name'] in csr['spec']['username'] and csr['status']:
if csr['status']['conditions'][0]['type'] == 'Approved':
node['accepted'] = True
# check type is 'Denied' and mark node as such
if csr['status'] and csr['status']['conditions'][0]['type'] == 'Denied':
node['denied'] = True
return node
return None
def finished(self):
'''determine if there are more csrs to sign'''
# if nodes is set and we have nodes then return if all nodes are 'accepted'
if self.nodes is not None and len(self.nodes) > 0:
return all([node['accepted'] or node['denied'] for node in self.nodes])
# we are approving everything or we still have nodes outstanding
return False
def manage(self, action):
'''run openshift oc adm ca create-server-cert cmd and store results into self.nodes
we attempt to verify if the node is one that was given to us to accept.
action - (allow | deny)
'''
results = []
# There are 2 types of requests:
# - node-bootstrapper-client-ip-172-31-51-246-ec2-internal
# The client request allows the client to talk to the api/controller
# - node-bootstrapper-server-ip-172-31-51-246-ec2-internal
# The server request allows the server to join the cluster
# Here we need to determine how to approve/deny
# we should query the csrs and verify they are from the nodes we thought
for csr in self.csrs:
node = self.match_node(csr)
# oc adm certificate <approve|deny> csr
# there are 3 known states: Denied, Aprroved, {}
# verify something is needed by OCcsr.action_needed
# if approve_all, then do it
# if you passed in nodes, you must have a node that matches
if self.approve_all or (node and OCcsr.action_needed(csr, action)):
result = self.openshift_cmd(['certificate', action, csr['metadata']['name']], oadm=True)
# client should have service account name in username field
# server should have node name in username field
if node and csr['metadata']['name'] not in node['csrs']:
node['csrs'][csr['metadata']['name']] = csr
# accept node in cluster
if node['name'] in csr['spec']['username']:
node['accepted'] = True
results.append(result)
return results
@staticmethod
def run_ansible(params, check_mode=False):
'''run the idempotent ansible code'''
client = OCcsr(params['nodes'],
params['approve_all'],
params['service_account'],
params['kubeconfig'],
params['debug'])
state = params['state']
api_rval = client.get()
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
if state in ['approve', 'deny']:
if check_mode:
return {'changed': True,
'msg': "CHECK_MODE: Would have {} the certificate.".format(params['state']),
'state': state}
all_results = []
finished = False
timeout = False
import time
# loop for timeout or block until all nodes pass
ctr = 0
while True:
all_results.extend(client.manage(params['state']))
if client.finished():
finished = True
break
if params['timeout'] == 0:
if not params['approve_all']:
ctr = 0
if ctr * 2 > params['timeout']:
timeout = True
break
# This provides time for the nodes to send their csr requests between approvals
time.sleep(2)
ctr += 1
for result in all_results:
if result['returncode'] != 0:
return {'failed': True, 'msg': all_results}
return dict(changed=len(all_results) > 0,
results=all_results,
nodes=client.nodes,
state=state,
finished=finished,
timeout=timeout)
return {'failed': True,
'msg': 'Unknown state passed. %s' % state}
| apache-2.0 |
mtrgroup/django-mtr-sync | mtr/sync/lib/processors/xlsx.py | 1 | 2227 | import os
os.environ['OPENPYXL_LXML'] = 'False'
import openpyxl
from ..processor import Processor
from ..manager import manager
from ...translation import gettext_lazy as _
@manager.register('processor')
class XlsxProcessor(Processor):
file_format = '.xlsx'
file_description = _('Microsoft Excel 2007/2010/2013 XML')
def create(self, path):
self._path = path
self._workbook = openpyxl.Workbook(optimized_write=True)
self._worksheet = self._workbook.create_sheet()
self._worksheet.title = self.settings.worksheet
if self.start['row'] > 1:
for i in range(0, self.start['row']):
self._worksheet.append([])
def open(self, path):
self._workbook = openpyxl.load_workbook(path, use_iterators=True)
if not self.settings.worksheet:
self.settings.worksheet = self._workbook.get_sheet_names()[0]
self._worksheet = self._workbook.get_sheet_by_name(
self.settings.worksheet)
self._read_from_start = False
self._worksheet.calculate_dimension()
self._rows = self._worksheet.iter_rows()
self._rows_counter = 0
self._max_cells = self._worksheet.get_highest_column()
return (
self._worksheet.get_highest_row(),
self._max_cells)
def write(self, row, value):
self._worksheet.append(value)
def _get_row(self, row):
value = None
row += 1
try:
if self._read_from_start:
self._rows = self._worksheet.iter_rows()
self._rows_counter = 0
while self._rows_counter < row:
self._rows_counter += 1
value = next(self._rows)
except StopIteration:
return [''] * (self._max_cells + 1)
return [v.value for v in value]
def read(self, row, cells=None):
readed = []
value = self._get_row(row)
cells = cells or self.cells
for index in cells:
try:
readed.append(value[index])
except IndexError:
readed.append('')
return readed
def save(self):
self._workbook.save(self._path)
| mit |
necaris/python3-openid | openid/store/filestore.py | 1 | 12673 | """
This module contains an C{L{OpenIDStore}} implementation backed by
flat files.
"""
import string
import os
import os.path
import time
import logging
from errno import EEXIST, ENOENT
from tempfile import mkstemp
from openid.association import Association
from openid.store.interface import OpenIDStore
from openid.store import nonce
from openid import cryptutil, oidutil
logger = logging.getLogger(__name__)
_filename_allowed = string.ascii_letters + string.digits + '.'
_isFilenameSafe = set(_filename_allowed).__contains__
def _safe64(s):
h64 = oidutil.toBase64(cryptutil.sha1(s))
# to be able to manipulate it, make it a bytearray
h64 = bytearray(h64)
h64 = h64.replace(b'+', b'_')
h64 = h64.replace(b'/', b'.')
h64 = h64.replace(b'=', b'')
return bytes(h64)
def _filenameEscape(s):
filename_chunks = []
for c in s:
if _isFilenameSafe(c):
filename_chunks.append(c)
else:
filename_chunks.append('_%02X' % ord(c))
return ''.join(filename_chunks)
def _removeIfPresent(filename):
"""Attempt to remove a file, returning whether the file existed at
the time of the call.
str -> bool
"""
try:
os.unlink(filename)
except OSError as why:
if why.errno == ENOENT:
# Someone beat us to it, but it's gone, so that's OK
return 0
else:
raise
else:
# File was present
return 1
def _ensureDir(dir_name):
"""Create dir_name as a directory if it does not exist. If it
exists, make sure that it is, in fact, a directory.
Can raise OSError
str -> NoneType
"""
try:
os.makedirs(dir_name)
except OSError as why:
if why.errno != EEXIST or not os.path.isdir(dir_name):
raise
class FileOpenIDStore(OpenIDStore):
"""
This is a filesystem-based store for OpenID associations and
nonces. This store should be safe for use in concurrent systems
on both windows and unix (excluding NFS filesystems). There are a
couple race conditions in the system, but those failure cases have
been set up in such a way that the worst-case behavior is someone
having to try to log in a second time.
Most of the methods of this class are implementation details.
People wishing to just use this store need only pay attention to
the C{L{__init__}} method.
Methods of this object can raise OSError if unexpected filesystem
conditions, such as bad permissions or missing directories, occur.
"""
def __init__(self, directory):
"""
Initializes a new FileOpenIDStore. This initializes the
nonce and association directories, which are subdirectories of
the directory passed in.
@param directory: This is the directory to put the store
directories in.
@type directory: C{str}
"""
# Make absolute
directory = os.path.normpath(os.path.abspath(directory))
self.nonce_dir = os.path.join(directory, 'nonces')
self.association_dir = os.path.join(directory, 'associations')
# Temp dir must be on the same filesystem as the assciations
# directory
self.temp_dir = os.path.join(directory, 'temp')
self.max_nonce_age = 6 * 60 * 60 # Six hours, in seconds
self._setup()
def _setup(self):
"""Make sure that the directories in which we store our data
exist.
() -> NoneType
"""
_ensureDir(self.nonce_dir)
_ensureDir(self.association_dir)
_ensureDir(self.temp_dir)
def _mktemp(self):
"""Create a temporary file on the same filesystem as
self.association_dir.
The temporary directory should not be cleaned if there are any
processes using the store. If there is no active process using
the store, it is safe to remove all of the files in the
temporary directory.
() -> (file, str)
"""
fd, name = mkstemp(dir=self.temp_dir)
try:
file_obj = os.fdopen(fd, 'wb')
return file_obj, name
except:
_removeIfPresent(name)
raise
def getAssociationFilename(self, server_url, handle):
"""Create a unique filename for a given server url and
handle. This implementation does not assume anything about the
format of the handle. The filename that is returned will
contain the domain name from the server URL for ease of human
inspection of the data directory.
(str, str) -> str
"""
if server_url.find('://') == -1:
raise ValueError('Bad server URL: %r' % server_url)
proto, rest = server_url.split('://', 1)
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
if handle:
handle_hash = _safe64(handle)
else:
handle_hash = ''
filename = '%s-%s-%s-%s' % (proto, domain, url_hash, handle_hash)
return os.path.join(self.association_dir, filename)
def storeAssociation(self, server_url, association):
"""Store an association in the association directory.
(str, Association) -> NoneType
"""
association_s = association.serialize() # NOTE: UTF-8 encoded bytes
filename = self.getAssociationFilename(server_url, association.handle)
tmp_file, tmp = self._mktemp()
try:
try:
tmp_file.write(association_s)
os.fsync(tmp_file.fileno())
finally:
tmp_file.close()
try:
os.rename(tmp, filename)
except OSError as why:
if why.errno != EEXIST:
raise
# We only expect EEXIST to happen only on Windows. It's
# possible that we will succeed in unlinking the existing
# file, but not in putting the temporary file in place.
try:
os.unlink(filename)
except OSError as why:
if why.errno == ENOENT:
pass
else:
raise
# Now the target should not exist. Try renaming again,
# giving up if it fails.
os.rename(tmp, filename)
except:
# If there was an error, don't leave the temporary file
# around.
_removeIfPresent(tmp)
raise
def getAssociation(self, server_url, handle=None):
"""Retrieve an association. If no handle is specified, return
the association with the latest expiration.
(str, str or NoneType) -> Association or NoneType
"""
if handle is None:
handle = ''
# The filename with the empty handle is a prefix of all other
# associations for the given server URL.
filename = self.getAssociationFilename(server_url, handle)
if handle:
return self._getAssociation(filename)
else:
association_files = os.listdir(self.association_dir)
matching_files = []
# strip off the path to do the comparison
name = os.path.basename(filename)
for association_file in association_files:
if association_file.startswith(name):
matching_files.append(association_file)
matching_associations = []
# read the matching files and sort by time issued
for name in matching_files:
full_name = os.path.join(self.association_dir, name)
association = self._getAssociation(full_name)
if association is not None:
matching_associations.append(
(association.issued, association))
matching_associations.sort()
# return the most recently issued one.
if matching_associations:
(_, assoc) = matching_associations[-1]
return assoc
else:
return None
def _getAssociation(self, filename):
try:
assoc_file = open(filename, 'rb')
except IOError as why:
if why.errno == ENOENT:
# No association exists for that URL and handle
return None
else:
raise
try:
assoc_s = assoc_file.read()
finally:
assoc_file.close()
try:
association = Association.deserialize(assoc_s)
except ValueError:
_removeIfPresent(filename)
return None
# Clean up expired associations
if association.expiresIn == 0:
_removeIfPresent(filename)
return None
else:
return association
def removeAssociation(self, server_url, handle):
"""Remove an association if it exists. Do nothing if it does not.
(str, str) -> bool
"""
assoc = self.getAssociation(server_url, handle)
if assoc is None:
return 0
else:
filename = self.getAssociationFilename(server_url, handle)
return _removeIfPresent(filename)
def useNonce(self, server_url, timestamp, salt):
"""Return whether this nonce is valid.
str -> bool
"""
if abs(timestamp - time.time()) > nonce.SKEW:
return False
if server_url:
proto, rest = server_url.split('://', 1)
else:
# Create empty proto / rest values for empty server_url,
# which is part of a consumer-generated nonce.
proto, rest = '', ''
domain = _filenameEscape(rest.split('/', 1)[0])
url_hash = _safe64(server_url)
salt_hash = _safe64(salt)
filename = '%08x-%s-%s-%s-%s' % (timestamp, proto, domain, url_hash,
salt_hash)
filename = os.path.join(self.nonce_dir, filename)
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o200)
except OSError as why:
if why.errno == EEXIST:
return False
else:
raise
else:
os.close(fd)
return True
def _allAssocs(self):
all_associations = []
association_filenames = [
os.path.join(self.association_dir, filename)
for filename in os.listdir(self.association_dir)
]
for association_filename in association_filenames:
try:
association_file = open(association_filename, 'rb')
except IOError as why:
if why.errno == ENOENT:
logger.exception("%s disappeared during %s._allAssocs" % (
association_filename, self.__class__.__name__))
else:
raise
else:
try:
assoc_s = association_file.read()
finally:
association_file.close()
# Remove expired or corrupted associations
try:
association = Association.deserialize(assoc_s)
except ValueError:
_removeIfPresent(association_filename)
else:
all_associations.append(
(association_filename, association))
return all_associations
def cleanup(self):
"""Remove expired entries from the database. This is
potentially expensive, so only run when it is acceptable to
take time.
() -> NoneType
"""
self.cleanupAssociations()
self.cleanupNonces()
def cleanupAssociations(self):
removed = 0
for assoc_filename, assoc in self._allAssocs():
if assoc.expiresIn == 0:
_removeIfPresent(assoc_filename)
removed += 1
return removed
def cleanupNonces(self):
nonces = os.listdir(self.nonce_dir)
now = time.time()
removed = 0
# Check all nonces for expiry
for nonce_fname in nonces:
timestamp = nonce_fname.split('-', 1)[0]
timestamp = int(timestamp, 16)
if abs(timestamp - now) > nonce.SKEW:
filename = os.path.join(self.nonce_dir, nonce_fname)
_removeIfPresent(filename)
removed += 1
return removed
| apache-2.0 |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/ZBng/PyScripts/Lib/zbng/mca/network/cmd/shares/type_Result.py | 1 | 9203 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
RESULT_LIST_TYPE_UNKNOWN = 0
RESULT_LIST_TYPE_WILDCARD = 1
RESULT_LIST_TYPE_DISK_DEVICE = 2
RESULT_LIST_TYPE_SPOOL_DEVICE = 3
RESULT_LIST_TYPE_IPC = 4
RESULT_LIST_STATUS_UNKNOWN = 0
RESULT_LIST_STATUS_OK = 1
RESULT_LIST_STATUS_PAUSED = 2
RESULT_LIST_STATUS_DISCONNECTED = 3
RESULT_LIST_STATUS_NETWORK_ERROR = 4
RESULT_LIST_STATUS_CONNECTING = 5
RESULT_LIST_STATUS_RECONNECTING = 6
RESULT_QUERY_TYPE_UNKNOWN = 0
RESULT_QUERY_TYPE_ANY = 1
RESULT_QUERY_TYPE_DISK = 2
RESULT_QUERY_TYPE_PRINT = 3
RESULT_QUERY_TYPE_DEVICE = 4
RESULT_QUERY_TYPE_IPC = 5
class ResultMap:
def __init__(self):
self.__dict__['resourcePath'] = ''
self.__dict__['resourceName'] = ''
def __getattr__(self, name):
if name == 'resourcePath':
return self.__dict__['resourcePath']
if name == 'resourceName':
return self.__dict__['resourceName']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'resourcePath':
self.__dict__['resourcePath'] = value
elif name == 'resourceName':
self.__dict__['resourceName'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_MAP_RESOURCE_PATH, self.__dict__['resourcePath'])
submsg.AddStringUtf8(MSG_KEY_RESULT_MAP_RESOURCE_NAME, self.__dict__['resourceName'])
mmsg.AddMessage(MSG_KEY_RESULT_MAP, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_MAP, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['resourcePath'] = submsg.FindString(MSG_KEY_RESULT_MAP_RESOURCE_PATH)
self.__dict__['resourceName'] = submsg.FindString(MSG_KEY_RESULT_MAP_RESOURCE_NAME)
class ResultList:
def __init__(self):
self.__dict__['local'] = ''
self.__dict__['remote'] = ''
self.__dict__['password'] = ''
self.__dict__['status'] = 0
self.__dict__['type'] = 0
self.__dict__['referenceCount'] = 0
self.__dict__['useCount'] = 0
self.__dict__['username'] = ''
self.__dict__['domainName'] = ''
def __getattr__(self, name):
if name == 'local':
return self.__dict__['local']
if name == 'remote':
return self.__dict__['remote']
if name == 'password':
return self.__dict__['password']
if name == 'status':
return self.__dict__['status']
if name == 'type':
return self.__dict__['type']
if name == 'referenceCount':
return self.__dict__['referenceCount']
if name == 'useCount':
return self.__dict__['useCount']
if name == 'username':
return self.__dict__['username']
if name == 'domainName':
return self.__dict__['domainName']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'local':
self.__dict__['local'] = value
elif name == 'remote':
self.__dict__['remote'] = value
elif name == 'password':
self.__dict__['password'] = value
elif name == 'status':
self.__dict__['status'] = value
elif name == 'type':
self.__dict__['type'] = value
elif name == 'referenceCount':
self.__dict__['referenceCount'] = value
elif name == 'useCount':
self.__dict__['useCount'] = value
elif name == 'username':
self.__dict__['username'] = value
elif name == 'domainName':
self.__dict__['domainName'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_LIST_LOCAL, self.__dict__['local'])
submsg.AddStringUtf8(MSG_KEY_RESULT_LIST_REMOTE, self.__dict__['remote'])
submsg.AddStringUtf8(MSG_KEY_RESULT_LIST_PASSWORD, self.__dict__['password'])
submsg.AddU8(MSG_KEY_RESULT_LIST_STATUS, self.__dict__['status'])
submsg.AddU8(MSG_KEY_RESULT_LIST_TYPE, self.__dict__['type'])
submsg.AddU32(MSG_KEY_RESULT_LIST_REFERENCE_COUNT, self.__dict__['referenceCount'])
submsg.AddU32(MSG_KEY_RESULT_LIST_USE_COUNT, self.__dict__['useCount'])
submsg.AddStringUtf8(MSG_KEY_RESULT_LIST_USERNAME, self.__dict__['username'])
submsg.AddStringUtf8(MSG_KEY_RESULT_LIST_DOMAIN, self.__dict__['domainName'])
mmsg.AddMessage(MSG_KEY_RESULT_LIST, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_LIST, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['local'] = submsg.FindString(MSG_KEY_RESULT_LIST_LOCAL)
self.__dict__['remote'] = submsg.FindString(MSG_KEY_RESULT_LIST_REMOTE)
self.__dict__['password'] = submsg.FindString(MSG_KEY_RESULT_LIST_PASSWORD)
self.__dict__['status'] = submsg.FindU8(MSG_KEY_RESULT_LIST_STATUS)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_RESULT_LIST_TYPE)
self.__dict__['referenceCount'] = submsg.FindU32(MSG_KEY_RESULT_LIST_REFERENCE_COUNT)
self.__dict__['useCount'] = submsg.FindU32(MSG_KEY_RESULT_LIST_USE_COUNT)
self.__dict__['username'] = submsg.FindString(MSG_KEY_RESULT_LIST_USERNAME)
self.__dict__['domainName'] = submsg.FindString(MSG_KEY_RESULT_LIST_DOMAIN)
class ResultQuery:
def __init__(self):
self.__dict__['name'] = ''
self.__dict__['path'] = ''
self.__dict__['hasPath'] = False
self.__dict__['type'] = RESULT_QUERY_TYPE_UNKNOWN
self.__dict__['admin'] = False
self.__dict__['caption'] = ''
self.__dict__['description'] = ''
def __getattr__(self, name):
if name == 'name':
return self.__dict__['name']
if name == 'path':
return self.__dict__['path']
if name == 'hasPath':
return self.__dict__['hasPath']
if name == 'type':
return self.__dict__['type']
if name == 'admin':
return self.__dict__['admin']
if name == 'caption':
return self.__dict__['caption']
if name == 'description':
return self.__dict__['description']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'name':
self.__dict__['name'] = value
elif name == 'path':
self.__dict__['path'] = value
elif name == 'hasPath':
self.__dict__['hasPath'] = value
elif name == 'type':
self.__dict__['type'] = value
elif name == 'admin':
self.__dict__['admin'] = value
elif name == 'caption':
self.__dict__['caption'] = value
elif name == 'description':
self.__dict__['description'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_QUERY_NAME, self.__dict__['name'])
submsg.AddStringUtf8(MSG_KEY_RESULT_QUERY_PATH, self.__dict__['path'])
submsg.AddBool(MSG_KEY_RESULT_QUERY_HAS_PATH, self.__dict__['hasPath'])
submsg.AddU8(MSG_KEY_RESULT_QUERY_TYPE, self.__dict__['type'])
submsg.AddBool(MSG_KEY_RESULT_QUERY_ADMIN, self.__dict__['admin'])
submsg.AddStringUtf8(MSG_KEY_RESULT_QUERY_CAPTION, self.__dict__['caption'])
submsg.AddStringUtf8(MSG_KEY_RESULT_QUERY_DESCRIPTION, self.__dict__['description'])
mmsg.AddMessage(MSG_KEY_RESULT_QUERY, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_QUERY, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['name'] = submsg.FindString(MSG_KEY_RESULT_QUERY_NAME)
self.__dict__['path'] = submsg.FindString(MSG_KEY_RESULT_QUERY_PATH)
self.__dict__['hasPath'] = submsg.FindBool(MSG_KEY_RESULT_QUERY_HAS_PATH)
self.__dict__['type'] = submsg.FindU8(MSG_KEY_RESULT_QUERY_TYPE)
self.__dict__['admin'] = submsg.FindBool(MSG_KEY_RESULT_QUERY_ADMIN)
self.__dict__['caption'] = submsg.FindString(MSG_KEY_RESULT_QUERY_CAPTION)
self.__dict__['description'] = submsg.FindString(MSG_KEY_RESULT_QUERY_DESCRIPTION) | unlicense |
sidartaoliveira/ansible | lib/ansible/modules/network/f5/bigip_gtm_wide_ip.py | 49 | 5381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Michael Perzel
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- Michael Perzel (@perzizzle)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=mysecret
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5_utils import bigip_api, f5_argument_spec
def get_wide_ip_lb_method(api, wide_ip):
lb_method = api.GlobalLB.WideIP.get_lb_method(wide_ips=[wide_ip])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def get_wide_ip_pools(api, wide_ip):
try:
return api.GlobalLB.WideIP.get_wideip_pool([wide_ip])
except Exception:
e = get_exception()
print(e)
def wide_ip_exists(api, wide_ip):
# hack to determine if wide_ip exists
result = False
try:
api.GlobalLB.WideIP.get_object_status(wide_ips=[wide_ip])
result = True
except bigsuds.OperationFailed:
e = get_exception()
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def set_wide_ip_lb_method(api, wide_ip, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.GlobalLB.WideIP.set_lb_method(wide_ips=[wide_ip], lb_methods=[lb_method])
def main():
argument_spec = f5_argument_spec()
lb_method_choices = ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
meta_args = dict(
lb_method = dict(type='str', required=True, choices=lb_method_choices),
wide_ip = dict(type='str', required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
wide_ip = module.params['wide_ip']
lb_method = module.params['lb_method']
validate_certs = module.params['validate_certs']
result = {'changed': False} # default
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not wide_ip_exists(api, wide_ip):
module.fail_json(msg="wide ip %s does not exist" % wide_ip)
if lb_method is not None and lb_method != get_wide_ip_lb_method(api, wide_ip):
if not module.check_mode:
set_wide_ip_lb_method(api, wide_ip, lb_method)
result = {'changed': True}
else:
result = {'changed': True}
except Exception:
e = get_exception()
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
towc/secret-ox | node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py | 1363 | 58344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
generator_filelist_paths = None
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
toplevel = params['options'].toplevel_dir
if params.get('flavor') == 'ninja':
generator_dir = os.path.relpath(params['options'].generator_output or '.')
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles-xcode-ninja'))
else:
output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
upgrade_check_project_version = \
generator_flags.get('xcode_upgrade_check_project_version', None)
# Format upgrade_check_project_version with leading zeros as needed.
if upgrade_check_project_version:
upgrade_check_project_version = str(upgrade_check_project_version)
while len(upgrade_check_project_version) < 4:
upgrade_check_project_version = '0' + upgrade_check_project_version
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
# Set project-level attributes from multiple options
project_attributes = {};
if parallel_builds:
project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
if upgrade_check_project_version:
project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
project_attributes['LastTestingUpgradeCheck'] = \
upgrade_check_project_version
project_attributes['LastSwiftUpdateCheck'] = \
upgrade_check_project_version
pbxp.SetProperty('attributes', project_attributes)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle':
'com.apple.product-type.application.watchapp',
'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit |
vrenaville/ngo-addons-backport | addons/point_of_sale/controllers/main.py | 56 | 5627 | # -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
from openerp.addons.web.controllers.main import manifest_list, module_boot, html_template
class PointOfSaleController(openerp.addons.web.http.Controller):
_cp_path = '/pos'
@openerp.addons.web.http.httprequest
def app(self, req, s_action=None, **kw):
js = "\n ".join('<script type="text/javascript" src="%s"></script>' % i for i in manifest_list(req, None, 'js'))
css = "\n ".join('<link rel="stylesheet" href="%s">' % i for i in manifest_list(req, None, 'css'))
cookie = req.httprequest.cookies.get("instance0|session_id")
session_id = cookie.replace("%22","")
template = html_template.replace('<html','<html manifest="/pos/manifest?session_id=%s"'%session_id)
r = template % {
'js': js,
'css': css,
'modules': simplejson.dumps(module_boot(req)),
'init': 'var wc = new s.web.WebClient();wc.appendTo($(document.body));'
}
return r
@openerp.addons.web.http.httprequest
def manifest(self, req, **kwargs):
""" This generates a HTML5 cache manifest files that preloads the categories and products thumbnails
and other ressources necessary for the point of sale to work offline """
ml = ["CACHE MANIFEST"]
# loading all the images in the static/src/img/* directories
def load_css_img(srcdir,dstdir):
for f in os.listdir(srcdir):
path = os.path.join(srcdir,f)
dstpath = os.path.join(dstdir,f)
if os.path.isdir(path) :
load_css_img(path,dstpath)
elif f.endswith(('.png','.PNG','.jpg','.JPG','.jpeg','.JPEG','.gif','.GIF')):
ml.append(dstpath)
imgdir = openerp.modules.get_module_resource('point_of_sale','static/src/img');
load_css_img(imgdir,'/point_of_sale/static/src/img')
products = req.session.model('product.product')
for p in products.search_read([('pos_categ_id','!=',False)], ['name']):
product_id = p['id']
url = "/web/binary/image?session_id=%s&model=product.product&field=image&id=%s" % (req.session_id, product_id)
ml.append(url)
categories = req.session.model('pos.category')
for c in categories.search_read([],['name']):
category_id = c['id']
url = "/web/binary/image?session_id=%s&model=pos.category&field=image&id=%s" % (req.session_id, category_id)
ml.append(url)
ml += ["NETWORK:","*"]
m = "\n".join(ml)
return m
@openerp.addons.web.http.jsonrequest
def dispatch(self, request, iface, **kwargs):
method = 'iface_%s' % iface
return getattr(self, method)(request, **kwargs)
@openerp.addons.web.http.jsonrequest
def scan_item_success(self, request, ean):
"""
A product has been scanned with success
"""
print 'scan_item_success: ' + str(ean)
return
@openerp.addons.web.http.jsonrequest
def scan_item_error_unrecognized(self, request, ean):
"""
A product has been scanned without success
"""
print 'scan_item_error_unrecognized: ' + str(ean)
return
@openerp.addons.web.http.jsonrequest
def help_needed(self, request):
"""
The user wants an help (ex: light is on)
"""
print "help_needed"
return
@openerp.addons.web.http.jsonrequest
def help_canceled(self, request):
"""
The user stops the help request
"""
print "help_canceled"
return
@openerp.addons.web.http.jsonrequest
def weighting_start(self, request):
print "weighting_start"
return
@openerp.addons.web.http.jsonrequest
def weighting_read_kg(self, request):
print "weighting_read_kg"
return 0.0
@openerp.addons.web.http.jsonrequest
def weighting_end(self, request):
print "weighting_end"
return
@openerp.addons.web.http.jsonrequest
def payment_request(self, request, price):
"""
The PoS will activate the method payment
"""
print "payment_request: price:"+str(price)
return 'ok'
@openerp.addons.web.http.jsonrequest
def payment_status(self, request):
print "payment_status"
return { 'status':'waiting' }
@openerp.addons.web.http.jsonrequest
def payment_cancel(self, request):
print "payment_cancel"
return
@openerp.addons.web.http.jsonrequest
def transaction_start(self, request):
print 'transaction_start'
return
@openerp.addons.web.http.jsonrequest
def transaction_end(self, request):
print 'transaction_end'
return
@openerp.addons.web.http.jsonrequest
def cashier_mode_activated(self, request):
print 'cashier_mode_activated'
return
@openerp.addons.web.http.jsonrequest
def cashier_mode_deactivated(self, request):
print 'cashier_mode_deactivated'
return
@openerp.addons.web.http.jsonrequest
def open_cashbox(self, request):
print 'open_cashbox'
return
@openerp.addons.web.http.jsonrequest
def print_receipt(self, request, receipt):
print 'print_receipt' + str(receipt)
return
@openerp.addons.web.http.jsonrequest
def print_pdf_invoice(self, request, pdfinvoice):
print 'print_pdf_invoice' + str(pdfinvoice)
return
| agpl-3.0 |
midospan/profitpy | profit/workbench/tickerplotdisplay.py | 18 | 4333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <[email protected]>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtCore import QVariant, pyqtSignature
from PyQt4.QtGui import QFrame
from profit.lib import Settings, Signals
from profit.lib.widgets.plot import Plot
from profit.workbench.widgets.ui_tickerplotdisplay import Ui_TickerPlotDisplay
class TickerPlotDisplay(QFrame, Ui_TickerPlotDisplay):
""" Combines one or more plot widgets into a single display.
"""
def __init__(self, parent=None):
""" Constructor.
@param parent ancestor object
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.sessionArgs = None
self.plotWidgets = []
def addPlot(self):
plot = Plot()
splitter = self.plotSplitter
widgets = self.plotWidgets
after = -1
try:
sender = self.sender().parent()
except (AttributeError, ):
pass
else:
plots = [(splitter.widget(i), i) for i in range(splitter.count())]
try:
after = 1 + dict(plots)[sender]
except (KeyError, ):
pass
widgets.append(plot)
self.reconfigurePlots()
self.connect(plot.actionNewPlot, Signals.triggered, self.addPlot)
self.connect(plot.actionClosePlot, Signals.triggered, self.closePlot)
self.connect(plot.actionSyncWithData, Signals.triggeredBool,
plot.syncPlot)
if self.sessionArgs:
session, collection, tickerId, args = self.sessionArgs
plot.setSessionPlot(session, collection, tickerId, *args)
splitter.insertWidget(after, plot)
plot.show()
def closePlot(self):
try:
plot = self.sender().parent()
except (AttributeError, ):
pass
else:
if plot in self.plotWidgets:
self.plotWidgets.remove(plot)
plot.close()
self.reconfigurePlots()
def reconfigurePlots(self):
self.setActionsEnabled()
self.reconnectSplitters()
self.renamePlots()
self.saveCount()
def reconnectSplitters(self):
connect = self.connect
disconnect = self.disconnect
widgets = self.plotWidgets
signal = Signals.splitterMoved
for widget in widgets:
wsplit = widget.plotSplitter
for other in [w for w in widgets if w is not widget]:
disconnect(wsplit, signal, other.plotSplitter.moveSplitter)
disconnect(other.plotSplitter, signal, wsplit.moveSplitter)
if widgets:
first, others = widgets[0], widgets[1:]
fsplit = first.plotSplitter
for other in others:
connect(fsplit, signal, other.plotSplitter.moveSplitter)
def renamePlots(self):
for index, plot in enumerate(self.plotWidgets):
plot.setObjectName('indexPlot%s' % index)
def saveCount(self):
settings = Settings()
settings.beginGroup('Plots')
settings.beginGroup('%s' % self.sessionArgs[1].__class__.__name__)
settings.setValue('displaycount', len(self.plotWidgets))
def setActionsEnabled(self):
single = len(self.plotWidgets) < 2
maxed = len(self.plotWidgets) > 5
for plot in self.plotWidgets:
plot.actionClosePlot.setEnabled(not single)
plot.actionNewPlot.setEnabled(not maxed)
def setSessionPlot(self, session, collection, tickerId, *args):
""" Associate a session with this instance.
@param session Session instance
@param tickerId id of ticker as integer
@param *indexes unused
@return None
"""
self.sessionArgs = (session, collection, tickerId, args)
if not self.plotWidgets:
settings = Settings()
settings.beginGroup('Plots')
settings.beginGroup('%s' % tickerId)
#count = settings.value('displaycount', QVariant(1)).toInt()[0]
count = 1
for i in range(count):
self.addPlot()
else:
for plot in self.plotWidgets:
plot.setSessionPlot(session, collection, tickerId, *args)
| gpl-2.0 |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/ec.py | 13 | 11124 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
InvalidSignature, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.openssl.utils import (
_calculate_digest_and_algorithm, _truncate_digest
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext, AsymmetricVerificationContext, ec
)
def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):
"""
This function truncates digests that are longer than a given elliptic
curve key's length so they can be signed. Since elliptic curve keys are
much shorter than RSA keys many digests (e.g. SHA-512) may require
truncation.
"""
_lib = backend._lib
_ffi = backend._ffi
group = _lib.EC_KEY_get0_group(ec_key_cdata)
with backend._tmp_bn_ctx() as bn_ctx:
order = _lib.BN_CTX_get(bn_ctx)
backend.openssl_assert(order != _ffi.NULL)
res = _lib.EC_GROUP_get_order(group, order, bn_ctx)
backend.openssl_assert(res == 1)
order_bits = _lib.BN_num_bits(order)
return _truncate_digest(digest, order_bits)
def _check_signature_algorithm(signature_algorithm):
if not isinstance(signature_algorithm, ec.ECDSA):
raise UnsupportedAlgorithm(
"Unsupported elliptic curve signature algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
def _ec_key_curve_sn(backend, ec_key):
group = backend._lib.EC_KEY_get0_group(ec_key)
backend.openssl_assert(group != backend._ffi.NULL)
nid = backend._lib.EC_GROUP_get_curve_name(group)
# The following check is to find EC keys with unnamed curves and raise
# an error for now.
if nid == backend._lib.NID_undef:
raise NotImplementedError(
"ECDSA certificates with unnamed curves are unsupported "
"at this time"
)
curve_name = backend._lib.OBJ_nid2sn(nid)
backend.openssl_assert(curve_name != backend._ffi.NULL)
sn = backend._ffi.string(curve_name).decode('ascii')
return sn
def _mark_asn1_named_ec_curve(backend, ec_cdata):
"""
Set the named curve flag on the EC_KEY. This causes OpenSSL to
serialize EC keys along with their curve OID which makes
deserialization easier.
"""
backend._lib.EC_KEY_set_asn1_flag(
ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE
)
def _sn_to_elliptic_curve(backend, sn):
try:
return ec._CURVE_TYPES[sn]()
except KeyError:
raise UnsupportedAlgorithm(
"{0} is not a supported elliptic curve".format(sn),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
def _ecdsa_sig_sign(backend, private_key, data):
max_size = backend._lib.ECDSA_size(private_key._ec_key)
backend.openssl_assert(max_size > 0)
sigbuf = backend._ffi.new("unsigned char[]", max_size)
siglen_ptr = backend._ffi.new("unsigned int[]", 1)
res = backend._lib.ECDSA_sign(
0, data, len(data), sigbuf, siglen_ptr, private_key._ec_key
)
backend.openssl_assert(res == 1)
return backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]
def _ecdsa_sig_verify(backend, public_key, signature, data):
res = backend._lib.ECDSA_verify(
0, data, len(data), signature, len(signature), public_key._ec_key
)
if res != 1:
backend._consume_errors()
raise InvalidSignature
return True
@utils.register_interface(AsymmetricSignatureContext)
class _ECDSASignatureContext(object):
def __init__(self, backend, private_key, algorithm):
self._backend = backend
self._private_key = private_key
self._digest = hashes.Hash(algorithm, backend)
def update(self, data):
self._digest.update(data)
def finalize(self):
digest = self._digest.finalize()
digest = _truncate_digest_for_ecdsa(
self._private_key._ec_key, digest, self._backend
)
return _ecdsa_sig_sign(self._backend, self._private_key, digest)
@utils.register_interface(AsymmetricVerificationContext)
class _ECDSAVerificationContext(object):
def __init__(self, backend, public_key, signature, algorithm):
self._backend = backend
self._public_key = public_key
self._signature = signature
self._digest = hashes.Hash(algorithm, backend)
def update(self, data):
self._digest.update(data)
def verify(self):
digest = self._digest.finalize()
digest = _truncate_digest_for_ecdsa(
self._public_key._ec_key, digest, self._backend
)
return _ecdsa_sig_verify(
self._backend, self._public_key, self._signature, digest
)
@utils.register_interface(ec.EllipticCurvePrivateKeyWithSerialization)
class _EllipticCurvePrivateKey(object):
def __init__(self, backend, ec_key_cdata, evp_pkey):
self._backend = backend
_mark_asn1_named_ec_curve(backend, ec_key_cdata)
self._ec_key = ec_key_cdata
self._evp_pkey = evp_pkey
sn = _ec_key_curve_sn(backend, ec_key_cdata)
self._curve = _sn_to_elliptic_curve(backend, sn)
curve = utils.read_only_property("_curve")
def signer(self, signature_algorithm):
_check_signature_algorithm(signature_algorithm)
return _ECDSASignatureContext(
self._backend, self, signature_algorithm.algorithm
)
def exchange(self, algorithm, peer_public_key):
if not (
self._backend.elliptic_curve_exchange_algorithm_supported(
algorithm, self.curve
)
):
raise UnsupportedAlgorithm(
"This backend does not support the ECDH algorithm.",
_Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM
)
if peer_public_key.curve.name != self.curve.name:
raise ValueError(
"peer_public_key and self are not on the same curve"
)
group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
z_len = (self._backend._lib.EC_GROUP_get_degree(group) + 7) // 8
self._backend.openssl_assert(z_len > 0)
z_buf = self._backend._ffi.new("uint8_t[]", z_len)
peer_key = self._backend._lib.EC_KEY_get0_public_key(
peer_public_key._ec_key
)
r = self._backend._lib.ECDH_compute_key(
z_buf, z_len, peer_key, self._ec_key, self._backend._ffi.NULL
)
self._backend.openssl_assert(r > 0)
return self._backend._ffi.buffer(z_buf)[:z_len]
def public_key(self):
group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
self._backend.openssl_assert(group != self._backend._ffi.NULL)
curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)
public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)
self._backend.openssl_assert(public_ec_key != self._backend._ffi.NULL)
public_ec_key = self._backend._ffi.gc(
public_ec_key, self._backend._lib.EC_KEY_free
)
point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
self._backend.openssl_assert(point != self._backend._ffi.NULL)
res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)
self._backend.openssl_assert(res == 1)
evp_pkey = self._backend._ec_cdata_to_evp_pkey(public_ec_key)
return _EllipticCurvePublicKey(self._backend, public_ec_key, evp_pkey)
def private_numbers(self):
bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key)
private_value = self._backend._bn_to_int(bn)
return ec.EllipticCurvePrivateNumbers(
private_value=private_value,
public_numbers=self.public_key().public_numbers()
)
def private_bytes(self, encoding, format, encryption_algorithm):
return self._backend._private_key_bytes(
encoding,
format,
encryption_algorithm,
self._evp_pkey,
self._ec_key
)
def sign(self, data, signature_algorithm):
_check_signature_algorithm(signature_algorithm)
data, algorithm = _calculate_digest_and_algorithm(
self._backend, data, signature_algorithm._algorithm
)
data = _truncate_digest_for_ecdsa(
self._ec_key, data, self._backend
)
return _ecdsa_sig_sign(self._backend, self, data)
@utils.register_interface(ec.EllipticCurvePublicKeyWithSerialization)
class _EllipticCurvePublicKey(object):
def __init__(self, backend, ec_key_cdata, evp_pkey):
self._backend = backend
_mark_asn1_named_ec_curve(backend, ec_key_cdata)
self._ec_key = ec_key_cdata
self._evp_pkey = evp_pkey
sn = _ec_key_curve_sn(backend, ec_key_cdata)
self._curve = _sn_to_elliptic_curve(backend, sn)
curve = utils.read_only_property("_curve")
def verifier(self, signature, signature_algorithm):
if not isinstance(signature, bytes):
raise TypeError("signature must be bytes.")
_check_signature_algorithm(signature_algorithm)
return _ECDSAVerificationContext(
self._backend, self, signature, signature_algorithm.algorithm
)
def public_numbers(self):
set_func, get_func, group = (
self._backend._ec_key_determine_group_get_set_funcs(self._ec_key)
)
point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
self._backend.openssl_assert(point != self._backend._ffi.NULL)
with self._backend._tmp_bn_ctx() as bn_ctx:
bn_x = self._backend._lib.BN_CTX_get(bn_ctx)
bn_y = self._backend._lib.BN_CTX_get(bn_ctx)
res = get_func(group, point, bn_x, bn_y, bn_ctx)
self._backend.openssl_assert(res == 1)
x = self._backend._bn_to_int(bn_x)
y = self._backend._bn_to_int(bn_y)
return ec.EllipticCurvePublicNumbers(
x=x,
y=y,
curve=self._curve
)
def public_bytes(self, encoding, format):
if format is serialization.PublicFormat.PKCS1:
raise ValueError(
"EC public keys do not support PKCS1 serialization"
)
return self._backend._public_key_bytes(
encoding,
format,
self,
self._evp_pkey,
None
)
def verify(self, signature, data, signature_algorithm):
_check_signature_algorithm(signature_algorithm)
data, algorithm = _calculate_digest_and_algorithm(
self._backend, data, signature_algorithm._algorithm
)
data = _truncate_digest_for_ecdsa(
self._ec_key, data, self._backend
)
return _ecdsa_sig_verify(self._backend, self, signature, data)
| gpl-2.0 |
hdinsight/hue | apps/oozie/src/oozie/migrations/0017_auto__add_bundledcoordinator__add_bundle.py | 39 | 25408 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BundledCoordinator'
db.create_table('oozie_bundledcoordinator', (
('coordinator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oozie.Coordinator'])),
('parameters', self.gf('django.db.models.fields.TextField')(default='[]')),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('bundle', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['oozie.Bundle'])),
))
db.send_create_signal('oozie', ['BundledCoordinator'])
# Adding model 'Bundle'
db.create_table('oozie_bundle', (
('kick_off_time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 2, 13, 22, 26, 34, 626668))),
('job_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Job'], unique=True, primary_key=True)),
))
db.send_create_signal('oozie', ['Bundle'])
def backwards(self, orm):
# Deleting model 'BundledCoordinator'
db.delete_table('oozie_bundledcoordinator')
# Deleting model 'Bundle'
db.delete_table('oozie_bundle')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': ['oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['oozie.Coordinator']", 'through': "orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 22, 26, 34, 626668)'})
},
'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 16, 22, 26, 34, 624131)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 22, 26, 34, 624101)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 22, 26, 34, 624810)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
| apache-2.0 |
rht/zulip | zerver/management/commands/send_custom_email.py | 1 | 4907 | import hashlib
import shutil
import subprocess
from argparse import ArgumentParser
from typing import Any, Dict, List
from zerver.lib.management import CommandError, ZulipBaseCommand
from zerver.lib.send_email import FromAddress, send_email
from zerver.models import UserProfile
from zerver.templatetags.app_filters import render_markdown_path
def send_custom_email(users: List[UserProfile], options: Dict[str, Any]) -> None:
"""
Can be used directly with from a management shell with
send_custom_email(user_profile_list, dict(
markdown_template_path="/path/to/markdown/file.md",
subject="Email Subject",
from_name="Sender Name")
)
"""
with open(options["markdown_template_path"], "r") as f:
email_template_hash = hashlib.sha256(f.read().encode('utf-8')).hexdigest()[0:32]
email_id = "zerver/emails/custom_email_%s" % (email_template_hash,)
markdown_email_base_template_path = "templates/zerver/emails/custom_email_base.pre.html"
html_source_template_path = "templates/%s.source.html" % (email_id,)
plain_text_template_path = "templates/%s.txt" % (email_id,)
subject_path = "templates/%s.subject.txt" % (email_id,)
# First, we render the markdown input file just like our
# user-facing docs with render_markdown_path.
shutil.copyfile(options['markdown_template_path'], plain_text_template_path)
rendered_input = render_markdown_path(plain_text_template_path.replace("templates/", ""))
# And then extend it with our standard email headers.
with open(html_source_template_path, "w") as f:
with open(markdown_email_base_template_path, "r") as base_template:
# Note that we're doing a hacky non-Jinja2 substitution here;
# we do this because the normal render_markdown_path ordering
# doesn't commute properly with inline-email-css.
f.write(base_template.read().replace('{{ rendered_input }}',
rendered_input))
with open(subject_path, "w") as f:
f.write(options["subject"])
# Then, we compile the email template using inline-email-css to
# add our standard styling to the paragraph tags (etc.).
#
# TODO: Ideally, we'd just refactor inline-email-css to
# compile this one template, not all of them.
subprocess.check_call(["./scripts/setup/inline-email-css"])
# Finally, we send the actual emails.
for user_profile in users:
context = {
'realm_uri': user_profile.realm.uri,
'realm_name': user_profile.realm.name,
}
send_email(email_id, to_user_ids=[user_profile.id],
from_address=FromAddress.SUPPORT,
reply_to_email=options.get("reply_to"),
from_name=options["from_name"], context=context)
class Command(ZulipBaseCommand):
help = """Send email to specified email address."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--entire-server', action="store_true", default=False,
help="Send to every user on the server. ")
parser.add_argument('--markdown-template-path', '--path',
dest='markdown_template_path',
required=True,
type=str,
help='Path to a markdown-format body for the email')
parser.add_argument('--subject',
required=True,
type=str,
help='Subject line for the email')
parser.add_argument('--from-name',
required=True,
type=str,
help='From line for the email')
parser.add_argument('--reply-to',
type=str,
help='Optional reply-to line for the email')
self.add_user_list_args(parser,
help="Email addresses of user(s) to send emails to.",
all_users_help="Send to every user on the realm.")
self.add_realm_args(parser)
def handle(self, *args: Any, **options: str) -> None:
if options["entire_server"]:
users = UserProfile.objects.filter(is_active=True, is_bot=False,
is_mirror_dummy=False)
else:
realm = self.get_realm(options)
try:
users = self.get_users(options, realm, is_bot=False)
except CommandError as error:
if str(error) == "You have to pass either -u/--users or -a/--all-users.":
raise CommandError("You have to pass -u/--users or -a/--all-users or --entire-server.")
raise error
send_custom_email(users, options)
| apache-2.0 |
belokop/indico_bare | indico/modules/events/registration/controllers/management/tickets.py | 1 | 5344 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from io import BytesIO
import qrcode
from flask import flash, json, redirect
from werkzeug.exceptions import Forbidden, NotFound
from indico.core.db import db
from indico.modules.events.registration.controllers.display import RHRegistrationFormRegistrationBase
from indico.modules.events.registration.controllers.management import RHManageRegFormBase
from indico.modules.events.registration.forms import TicketsForm
from indico.modules.events.registration.models.registrations import RegistrationState
from indico.modules.events.registration.views import WPManageRegistration
from indico.modules.oauth.models.applications import OAuthApplication
from indico.util.date_time import format_date
from indico.util.i18n import _
from indico.web.flask.util import url_for, send_file, secure_filename
from MaKaC.PDFinterface.conference import TicketToPDF
from MaKaC.common import Config
class RHRegistrationFormTickets(RHManageRegFormBase):
"""Display and modify ticket settings."""
def _check_ticket_app_enabled(self):
config = Config.getInstance()
checkin_app_client_id = config.getCheckinAppClientId()
if checkin_app_client_id is None:
flash(_("indico-checkin client_id is not defined in the Indico configuration"), 'warning')
return False
checkin_app = OAuthApplication.find_first(client_id=checkin_app_client_id)
if checkin_app is None:
flash(_("indico-checkin is not registered as an OAuth application with client_id {}")
.format(checkin_app_client_id), 'warning')
return False
return True
def _process(self):
form = TicketsForm(obj=self.regform)
if form.validate_on_submit():
form.populate_obj(self.regform)
db.session.flush()
return redirect(url_for('.tickets', self.regform))
return WPManageRegistration.render_template('management/regform_tickets.html', self.event,
regform=self.regform, form=form,
can_enable_tickets=self._check_ticket_app_enabled())
def generate_ticket(registration):
pdf = TicketToPDF(registration.registration_form.event, registration)
return BytesIO(pdf.getPDFBin())
class RHTicketDownload(RHRegistrationFormRegistrationBase):
"""Generate ticket for a given registration"""
def _checkParams(self, params):
RHRegistrationFormRegistrationBase._checkParams(self, params)
if not self.registration:
raise NotFound
def _checkProtection(self):
RHRegistrationFormRegistrationBase._checkProtection(self)
if self.registration.state != RegistrationState.complete:
raise Forbidden
if not self.regform.tickets_enabled:
raise Forbidden
if not self.regform.ticket_on_event_page and not self.regform.ticket_on_summary_page:
raise Forbidden
def _process(self):
filename = secure_filename('{}-Ticket.pdf'.format(self.event_new.title), 'ticket.pdf')
return send_file(filename, generate_ticket(self.registration), 'application/pdf')
class RHTicketConfigQRCode(RHManageRegFormBase):
"""Display configuration QRCode."""
def _process(self):
config = Config.getInstance()
# QRCode (Version 6 with error correction L can contain up to 106 bytes)
qr = qrcode.QRCode(
version=6,
error_correction=qrcode.constants.ERROR_CORRECT_M,
box_size=4,
border=1
)
checkin_app_client_id = config.getCheckinAppClientId()
checkin_app = OAuthApplication.find_first(client_id=checkin_app_client_id)
base_url = config.getBaseSecureURL() if config.getBaseSecureURL() else config.getBaseURL()
qr_data = {
"event_id": self._conf.getId(),
"title": self._conf.getTitle(),
"date": format_date(self._conf.getAdjustedStartDate()),
"server": {
"baseUrl": base_url,
"consumerKey": checkin_app.client_id,
"auth_url": url_for('oauth.oauth_authorize', _external=True),
"token_url": url_for('oauth.oauth_token', _external=True)
}
}
json_qr_data = json.dumps(qr_data)
qr.add_data(json_qr_data)
qr.make(fit=True)
qr_img = qr.make_image()
output = BytesIO()
qr_img.save(output)
output.seek(0)
return send_file('config.png', output, 'image/png')
| gpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/django/views/generic/detail.py | 78 | 5572 | from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, View
class SingleObjectMixin(object):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg, None)
slug = self.kwargs.get(self.slug_url_kwarg, None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"Generic detail view %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Get the queryset to look an object up against. May not be called if
`get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"%(cls)s is missing a queryset. Define "
u"%(cls)s.model, %(cls)s.queryset, or override "
u"%(cls)s.get_object()." % {
'cls': self.__class__.__name__
})
return self.queryset._clone()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(obj, '_meta'):
return smart_str(obj._meta.object_name.lower())
else:
return None
def get_context_data(self, **kwargs):
context = kwargs
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class BaseDetailView(SingleObjectMixin, View):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if hasattr(self.object, '_meta'):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.object_name.lower(),
self.template_name_suffix
))
elif hasattr(self, 'model') and hasattr(self.model, '_meta'):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
self.template_name_suffix
))
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
| agpl-3.0 |
2014c2g2/2014c2 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/test/testmock/testpatch.py | 739 | 53126 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
marc-sensenich/ansible | lib/ansible/modules/cloud/cloudstack/cs_host.py | 13 | 18375 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_host
short_description: Manages hosts on Apache CloudStack based clouds.
description:
- Create, update and remove hosts.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the host.
required: true
aliases: [ 'ip_address' ]
url:
description:
- Url of the host used to create a host.
- If not provided, C(http://) and param C(name) is used as url.
- Only considered if C(state=present) and host does not yet exist.
username:
description:
- Username for the host.
- Required if C(state=present) and host does not yet exist.
password:
description:
- Password for the host.
- Required if C(state=present) and host does not yet exist.
pod:
description:
- Name of the pod.
- Required if C(state=present) and host does not yet exist.
cluster:
description:
- Name of the cluster.
hypervisor:
description:
- Name of the cluster.
- Required if C(state=present) and host does not yet exist.
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator' ]
allocation_state:
description:
- Allocation state of the host.
choices: [ 'enabled', 'disabled' ]
host_tags:
description:
- Tags of the host.
aliases: [ host_tag ]
state:
description:
- State of the host.
default: 'present'
choices: [ 'present', 'absent' ]
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure a host is present but disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
cluster: vcenter.example.com/ch-zrh-ix/pod01-cluster01
pod: pod01
zone: ch-zrh-ix-01
hypervisor: VMware
allocation_state: disabled
host_tags:
- perf
- gpu
- name: Ensure an existing host is disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: disabled
- name: Ensure an existing host is enabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: enabled
- name: Ensure a host is absent
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
capabilities:
description: Capabilities of the host.
returned: success
type: str
sample: hvm
cluster:
description: Cluster of the host.
returned: success
type: str
sample: vcenter.example.com/zone/cluster01
cluster_type:
description: Type of the cluster of the host.
returned: success
type: str
sample: ExternalManaged
cpu_allocated:
description: Amount in percent of the host's CPU currently allocated.
returned: success
type: str
sample: 166.25%
cpu_number:
description: Number of CPUs of the host.
returned: success
type: str
sample: 24
cpu_sockets:
description: Number of CPU sockets of the host.
returned: success
type: int
sample: 2
cpu_speed:
description: CPU speed in Mhz
returned: success
type: int
sample: 1999
cpu_used:
description: Amount of the host's CPU currently used.
returned: success
type: str
sample: 33.6%
cpu_with_overprovisioning:
description: Amount of the host's CPU after applying the cpu.overprovisioning.factor.
returned: success
type: str
sample: 959520.0
created:
description: Date when the host was created.
returned: success
type: str
sample: 2015-05-03T15:05:51+0200
disconnected:
description: Date when the host was disconnected.
returned: success
type: str
sample: 2015-05-03T15:05:51+0200
disk_size_allocated:
description: Host's currently allocated disk size.
returned: success
type: int
sample: 2593
disk_size_total:
description: Total disk size of the host
returned: success
type: int
sample: 259300
events:
description: Events available for the host
returned: success
type: str
sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown"
ha_host:
description: Whether the host is a HA host.
returned: success
type: bool
sample: false
has_enough_capacity:
description: Whether the host has enough CPU and RAM capacity to migrate a VM to it.
returned: success
type: bool
sample: true
host_tags:
description: Comma-separated list of tags for the host.
returned: success
type: str
sample: "perf"
hypervisor:
description: Host's hypervisor.
returned: success
type: str
sample: VMware
hypervisor_version:
description: Hypervisor version.
returned: success
type: str
sample: 5.1
ip_address:
description: IP address of the host
returned: success
type: str
sample: 10.10.10.1
is_local_storage_active:
description: Whether the local storage is available or not.
returned: success
type: bool
sample: false
last_pinged:
description: Date and time the host was last pinged.
returned: success
type: str
sample: "1970-01-17T17:27:32+0100"
management_server_id:
description: Management server ID of the host.
returned: success
type: int
sample: 345050593418
memory_allocated:
description: Amount of the host's memory currently allocated.
returned: success
type: int
sample: 69793218560
memory_total:
description: Total of memory of the host.
returned: success
type: int
sample: 206085263360
memory_used:
description: Amount of the host's memory currently used.
returned: success
type: int
sample: 65504776192
name:
description: Name of the host.
returned: success
type: str
sample: esx32.example.com
network_kbs_read:
description: Incoming network traffic on the host.
returned: success
type: int
sample: 0
network_kbs_write:
description: Outgoing network traffic on the host.
returned: success
type: int
sample: 0
os_category:
description: OS category name of the host.
returned: success
type: str
sample: ...
out_of_band_management:
description: Host out-of-band management information.
returned: success
type: str
sample: ...
pod:
description: Pod name of the host.
returned: success
type: str
sample: Pod01
removed:
description: Date and time the host was removed.
returned: success
type: str
sample: "1970-01-17T17:27:32+0100"
resource_state:
description: Resource state of the host.
returned: success
type: str
sample: Enabled
allocation_state::
description: Allocation state of the host.
returned: success
type: str
sample: enabled
state:
description: State of the host.
returned: success
type: str
sample: Up
suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
to it or not.
returned: success
type: str
sample: true
host_type:
description: Type of the host.
returned: success
type: str
sample: Routing
host_version:
description: Version of the host.
returned: success
type: str
sample: 4.5.2
gpu_group:
description: GPU cards present in the host.
returned: success
type: list
sample: []
zone:
description: Zone of the host.
returned: success
type: str
sample: zone01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS
)
import time
class AnsibleCloudStackHost(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackHost, self).__init__(module)
self.returns = {
'averageload': 'average_load',
'capabilities': 'capabilities',
'clustername': 'cluster',
'clustertype': 'cluster_type',
'cpuallocated': 'cpu_allocated',
'cpunumber': 'cpu_number',
'cpusockets': 'cpu_sockets',
'cpuspeed': 'cpu_speed',
'cpuused': 'cpu_used',
'cpuwithoverprovisioning': 'cpu_with_overprovisioning',
'disconnected': 'disconnected',
'details': 'details',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'events': 'events',
'hahost': 'ha_host',
'hasenoughcapacity': 'has_enough_capacity',
'hypervisor': 'hypervisor',
'hypervisorversion': 'hypervisor_version',
'ipaddress': 'ip_address',
'islocalstorageactive': 'is_local_storage_active',
'lastpinged': 'last_pinged',
'managementserverid': 'management_server_id',
'memoryallocated': 'memory_allocated',
'memorytotal': 'memory_total',
'memoryused': 'memory_used',
'networkkbsread': 'network_kbs_read',
'networkkbswrite': 'network_kbs_write',
'oscategoryname': 'os_category',
'outofbandmanagement': 'out_of_band_management',
'podname': 'pod',
'removed': 'removed',
'resourcestate': 'resource_state',
'suitableformigration': 'suitable_for_migration',
'type': 'host_type',
'version': 'host_version',
'gpugroup': 'gpu_group',
}
# States only usable by the updateHost API
self.allocation_states_for_update = {
'enabled': 'Enable',
'disabled': 'Disable',
}
self.host = None
def get_pod(self, key=None):
pod_name = self.module.params.get('pod')
if not pod_name:
return None
args = {
'name': pod_name,
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found" % pod_name)
def get_cluster(self, key=None):
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
'zoneid': self.get_zone(key='id'),
}
clusters = self.query_api('listClusters', **args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.module.fail_json(msg="Cluster %s not found" % cluster_name)
def get_host_tags(self):
host_tags = self.module.params.get('host_tags')
if host_tags is None:
return None
return ','.join(host_tags)
def get_host(self, refresh=False):
if self.host is not None and not refresh:
return self.host
name = self.module.params.get('name')
args = {
'zoneid': self.get_zone(key='id'),
'fetch_list': True,
}
res = self.query_api('listHosts', **args)
if res:
for h in res:
if name in [h['ipaddress'], h['name']]:
self.host = h
return self.host
def _handle_allocation_state(self, host):
allocation_state = self.module.params.get('allocation_state')
if not allocation_state:
return host
host = self._set_host_allocation_state(host)
# In case host in maintenance and target is maintenance
if host['allocationstate'].lower() == allocation_state and allocation_state == 'maintenance':
return host
# Cancel maintenance if target state is enabled/disabled
elif allocation_state in list(self.allocation_states_for_update.keys()):
host = self.disable_maintenance(host)
host = self._update_host(host, self.allocation_states_for_update[allocation_state])
# Only an enabled host can put in maintenance
elif allocation_state == 'maintenance':
host = self._update_host(host, 'Enable')
host = self.enable_maintenance(host)
return host
def _set_host_allocation_state(self, host):
if host is None:
host['allocationstate'] = 'Enable'
# Set host allocationstate to be disabled/enabled
elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()):
host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
else:
host['allocationstate'] = host['resourcestate']
return host
def present_host(self):
host = self.get_host()
if not host:
host = self._create_host(host)
else:
host = self._update_host(host)
if host:
host = self._handle_allocation_state(host)
return host
def _get_url(self):
url = self.module.params.get('url')
if url:
return url
else:
return "http://%s" % self.module.params.get('name')
def _create_host(self, host):
required_params = [
'password',
'username',
'hypervisor',
'pod',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = {
'hypervisor': self.module.params.get('hypervisor'),
'url': self._get_url(),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'podid': self.get_pod(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
'hosttags': self.get_host_tags(),
}
if not self.module.check_mode:
host = self.query_api('addHost', **args)
host = host['host'][0]
return host
def _update_host(self, host, allocation_state=None):
args = {
'id': host['id'],
'hosttags': self.get_host_tags(),
'allocationstate': allocation_state,
}
if allocation_state is not None:
host = self._set_host_allocation_state(host)
if self.has_changed(args, host):
self.result['changed'] = True
if not self.module.check_mode:
host = self.query_api('updateHost', **args)
host = host['host']
return host
def absent_host(self):
host = self.get_host()
if host:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.enable_maintenance(host)
if res:
res = self.query_api('deleteHost', **args)
return host
def enable_maintenance(self, host):
if host['resourcestate'] not in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('prepareHostForMaintenance', **args)
self.poll_job(res, 'host')
host = self._poll_for_maintenance()
return host
def disable_maintenance(self, host):
if host['resourcestate'] in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('cancelHostMaintenance', **args)
host = self.poll_job(res, 'host')
return host
def _poll_for_maintenance(self):
for i in range(0, 300):
time.sleep(2)
host = self.get_host(refresh=True)
if not host:
return None
elif host['resourcestate'] != 'PrepareForMaintenance':
return host
self.fail_json(msg="Polling for maintenance timed out")
def get_result(self, host):
super(AnsibleCloudStackHost, self).get_result(host)
if host:
self.result['allocation_state'] = host['resourcestate'].lower()
self.result['host_tags'] = host['hosttags'].split(',') if host.get('hosttags') else []
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['ip_address']),
url=dict(),
password=dict(no_log=True),
username=dict(),
hypervisor=dict(choices=CS_HYPERVISORS),
allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']),
pod=dict(),
cluster=dict(),
host_tags=dict(type='list', aliases=['host_tag']),
zone=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_host = AnsibleCloudStackHost(module)
state = module.params.get('state')
if state == 'absent':
host = acs_host.absent_host()
else:
host = acs_host.present_host()
result = acs_host.get_result(host)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
captiosus/treadmill | treadmill/cli/admin/cloud.py | 1 | 24095 | import os
import click
from pprint import pprint
import logging
from treadmill.infra import constants, connection, vpc, subnet
from treadmill.infra.setup import ipa, ldap, node, cell
from treadmill.infra.utils import security_group, hosted_zones
from treadmill.infra.utils import mutually_exclusive_option, cli_callbacks
_LOGGER = logging.getLogger(__name__)
_OPTIONS_FILE = 'manifest'
def init():
"""Cloud CLI module"""
@click.group()
@click.option('--domain', required=True,
envvar='TREADMILL_DNS_DOMAIN',
callback=cli_callbacks.validate_domain,
help='Domain for hosted zone')
@click.pass_context
def cloud(ctx, domain):
"""Manage Treadmill on cloud"""
ctx.obj['DOMAIN'] = domain
@cloud.group()
def configure():
"""Configure Treadmill EC2 Objects"""
pass
@configure.command(name='vpc')
@click.option('--region', help='Region for the vpc')
@click.option('--vpc-cidr-block', default='172.23.0.0/16',
show_default=True,
help='CIDR block for the vpc')
@click.option('--secgroup_name', default='sg_common',
show_default=True,
help='Security group name')
@click.option(
'--secgroup_desc',
default='Treadmill Security Group',
show_default=True,
help='Description for the security group'
)
@click.option(
'--name',
required=True,
help='VPC name',
callback=cli_callbacks.validate_vpc_name
)
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_cidr_block',
'secgroup_desc',
'secgroup_name',
'name'],
help="Options YAML file. ")
@click.pass_context
def configure_vpc(ctx, region, vpc_cidr_block,
secgroup_name, secgroup_desc,
name, manifest):
"""Configure Treadmill VPC"""
domain = ctx.obj['DOMAIN']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_vpc = vpc.VPC.setup(
name=name,
cidr_block=vpc_cidr_block,
secgroup_name=secgroup_name,
secgroup_desc=secgroup_desc
)
click.echo(
pprint(_vpc.show())
)
@configure.command(name='ldap')
@click.option('--vpc-name', 'vpc_id',
required=True,
callback=cli_callbacks.convert_to_vpc_id,
help='VPC name')
@click.option('--region', help='Region for the vpc')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--name', required=True, help='LDAP Instance Name')
@click.option('--image', required=True,
help='Image to use for instances e.g. RHEL-7.4')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['micro'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.current_release_version,
help='Treadmill release to use')
@click.option('--app-root', default='/var/tmp',
show_default=True,
help='Treadmill app root')
@click.option('--ldap-cidr-block', default='172.23.1.0/24',
show_default=True,
help='CIDR block for LDAP')
@click.option('--ldap-subnet-id', help='Subnet ID for LDAP')
@click.option('--cell-subnet-id', help='Subnet ID of Cell',
required=True)
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'key',
'name',
'image',
'instance_type',
'tm_release',
'app_root',
'ldap_subnet_id',
'cell_subnet_id',
'ipa_admin_password'
'ldap_cidr_block'],
help="Options YAML file. ")
@click.pass_context
def configure_ldap(ctx, vpc_id, region, key, name, image,
instance_type, tm_release, app_root,
ldap_cidr_block, ldap_subnet_id, cell_subnet_id,
ipa_admin_password, manifest):
"""Configure Treadmill LDAP"""
domain = ctx.obj['DOMAIN']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_ldap = ldap.LDAP(
name=name,
vpc_id=vpc_id,
)
_ldap.setup(
key=key,
count=1,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
cidr_block=ldap_cidr_block,
cell_subnet_id=cell_subnet_id,
subnet_id=ldap_subnet_id,
ipa_admin_password=ipa_admin_password,
)
click.echo(
pprint(_ldap.subnet.show())
)
@configure.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
required=True,
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.option('--region', help='Region for the vpc')
@click.option('--name', default='TreadmillMaster',
show_default=True,
help='Treadmill master name')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--count', default='3', type=int,
show_default=True,
help='Number of Treadmill masters to spin up')
@click.option('--image', required=True,
help='Image to use for new instances e.g. RHEL-7.4')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['micro'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.current_release_version,
help='Treadmill release to use')
@click.option('--app-root', default='/var/tmp',
show_default=True,
help='Treadmill app root')
@click.option('--cell-cidr-block', default='172.23.0.0/24',
show_default=True,
help='CIDR block for the cell')
@click.option('--ldap-cidr-block', default='172.23.1.0/24',
show_default=True,
help='CIDR block for LDAP')
@click.option('--subnet-id', help='Subnet ID')
@click.option('--ldap-subnet-id',
help='Subnet ID for LDAP')
@click.option('--without-ldap', required=False, is_flag=True,
show_default=True,
default=False, help='Flag for LDAP Server')
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'name',
'key',
'count',
'image',
'instance_type',
'tm_release',
'app_root',
'cell_cidr_block'
'ldap_subnet_id',
'subnet_id',
'ipa_admin_password',
'without_ldap',
'ldap_cidr_block'],
help="Options YAML file. ")
@click.pass_context
def configure_cell(ctx, vpc_id, region, name, key, count, image,
instance_type, tm_release, app_root,
cell_cidr_block, ldap_cidr_block,
subnet_id, ldap_subnet_id,
without_ldap, ipa_admin_password, manifest):
"""Configure Treadmill Cell"""
domain = ctx.obj['DOMAIN']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_cell = cell.Cell(
vpc_id=vpc_id,
subnet_id=subnet_id,
)
result = {}
if not without_ldap:
_ldap = ldap.LDAP(
name='TreadmillLDAP',
vpc_id=vpc_id,
)
_ldap.setup(
key=key,
count=1,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
cidr_block=ldap_cidr_block,
cell_subnet_id=_cell.id,
subnet_id=ldap_subnet_id,
ipa_admin_password=ipa_admin_password,
)
result['Ldap'] = _ldap.subnet.show()
_cell.setup_zookeeper(
name='TreadmillZookeeper',
key=key,
count=count,
image=image,
instance_type=instance_type,
subnet_cidr_block=cell_cidr_block,
ipa_admin_password=ipa_admin_password
)
_cell.setup_master(
name=name,
key=key,
count=count,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
subnet_cidr_block=cell_cidr_block,
ipa_admin_password=ipa_admin_password
)
result['Cell'] = _cell.show()
click.echo(
pprint(result)
)
@configure.command(name='domain')
@click.option('--name', default='TreadmillIPA',
show_default=True,
help='Name of the instance')
@click.option('--region', help='Region for the vpc')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-cidr-block', help='Cidr block of subnet for IPA',
show_default=True,
default='172.23.2.0/24')
@click.option('--subnet-id', help='Subnet ID')
@click.option('--count', help='Count of the instances',
show_default=True,
default=1)
@click.option('--ipa-admin-password',
callback=cli_callbacks.validate_ipa_password,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('--tm-release',
callback=cli_callbacks.current_release_version,
help='Treadmill Release')
@click.option('--key', required=True, help='SSH key name')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['medium'],
show_default=True,
help='Instance type')
@click.option('--image', required=True,
help='Image to use for new master instance e.g. RHEL-7.4')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_id',
'name',
'key',
'count',
'image',
'instance_type',
'tm_release',
'subnet_cidr_block'
'subnet_id',
'ipa_admin_password'],
help="Options YAML file. ")
@click.pass_context
def configure_domain(ctx, name, region, vpc_id,
subnet_cidr_block, subnet_id,
count, ipa_admin_password, tm_release, key,
instance_type, image, manifest):
"""Configure Treadmill Domain (IPA)"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if region:
connection.Connection.context.region_name = region
if not ipa_admin_password:
ipa_admin_password = os.environ.get(
'TREADMILL_IPA_ADMIN_PASSWORD',
click.prompt(
'Create IPA admin password ',
hide_input=True,
confirmation_prompt=True
)
)
_ipa = ipa.IPA(name=name, vpc_id=vpc_id)
_ipa.setup(
subnet_id=subnet_id,
count=count,
ipa_admin_password=ipa_admin_password,
tm_release=tm_release,
key=key,
instance_type=instance_type,
image=image,
cidr_block=subnet_cidr_block,
)
click.echo(
pprint(_ipa.show())
)
@configure.command(name='node')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--region', help='Region for the vpc')
@click.option('--name', default='TreadmillNode',
show_default=True,
help='Node name')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--image', required=True,
help='Image to use for new node instance e.g. RHEL-7.4')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['large'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.current_release_version,
help='Treadmill release to use')
@click.option('--app-root', default='/var/tmp/treadmill-node',
show_default=True,
help='Treadmill app root')
@click.option('--subnet-id', required=True, help='Subnet ID')
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('--with-api', required=False, is_flag=True,
show_default=True,
default=False, help='Provision node with Treadmill APIs')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'name',
'key',
'image',
'instance_type',
'tm_release',
'app_root',
'subnet_id',
'ipa_admin_password'
'with_api'],
help="Options YAML file. ")
@click.pass_context
def configure_node(ctx, vpc_id, region, name, key, image,
instance_type, tm_release, app_root,
subnet_id, ipa_admin_password, with_api, manifest):
"""Configure new Node in Cell"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if region:
connection.Connection.context.region_name = region
if not ipa_admin_password:
ipa_admin_password = os.environ.get(
'TREADMILL_IPA_ADMIN_PASSWORD',
click.prompt('IPA admin password ', hide_input=True)
)
_node = node.Node(name, vpc_id)
_node.setup(
key=key,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
subnet_id=subnet_id,
ipa_admin_password=ipa_admin_password,
with_api=with_api,
)
click.echo(
pprint(_node.subnet.show())
)
@cloud.group()
def delete():
"""Delete Treadmill EC2 Objects"""
pass
@delete.command(name='vpc')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.pass_context
def delete_vpc(ctx, vpc_id):
"""Delete VPC"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
vpc.VPC(id=vpc_id).delete()
@delete.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-id', required=True, help='Subnet ID of cell')
@click.pass_context
def delete_cell(ctx, vpc_id, subnet_id):
"""Delete Cell (Subnet)"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
subnet.Subnet(id=subnet_id).destroy()
@delete.command(name='domain')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-id', required=True, help='Subnet ID of IPA')
@click.option('--name', help='Name of Instance',
show_default=True,
default="TreadmillIPA")
@click.pass_context
def delete_domain(ctx, vpc_id, subnet_id, name):
"""Delete IPA"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
_ipa = ipa.IPA(name=name, vpc_id=vpc_id)
_ipa.destroy(subnet_id=subnet_id)
@delete.command(name='ldap')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-id', help='Subnet ID of LDAP')
@click.option('--name', help='Name of Instance',
show_default=True,
default="TreadmillLDAP")
@click.pass_context
def delete_ldap(ctx, vpc_id, subnet_id, name):
"""Delete LDAP"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
_ldap = ldap.LDAP(name=name, vpc_id=vpc_id)
_ldap.destroy(subnet_id=subnet_id)
@delete.command(name='node')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--name', help='Instance Name', required=False)
@click.option('--instance-id', help='Instance ID', required=False)
@click.pass_context
def delete_node(ctx, vpc_id, name, instance_id):
"""Delete Node"""
domain = ctx.obj['DOMAIN']
if not name and not instance_id:
_LOGGER.error('Provide either --name or --instance-id of'
'Node Instance and try again.')
return
connection.Connection.context.domain = domain
_node = node.Node(name=name, vpc_id=vpc_id)
_node.destroy(instance_id=instance_id)
@cloud.group('list')
def _list():
"""Show Treadmill Cloud Resources"""
pass
@_list.command(name='vpc')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.pass_context
def vpc_resources(ctx, vpc_id):
"""Show VPC(s)"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if vpc_id:
result = pprint(vpc.VPC(id=vpc_id).show())
click.echo(result)
else:
_vpcs = vpc.VPC.all()
result = list(map(lambda v: {'id': v.id, 'name': v.name}, _vpcs))
click.echo({'Vpcs': result})
@_list.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.option('--subnet-id', help='Subnet ID of cell')
@click.pass_context
def cell_resources(ctx, vpc_id, subnet_id):
"""Show Cell"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if subnet_id:
click.echo(
pprint(
subnet.Subnet(id=subnet_id).show()
)
)
return
if vpc_id:
vpcs = [vpc_id]
else:
vpcs = [_vpc.id for _vpc in vpc.VPC.all()]
result = []
for v in vpcs:
subnets = vpc.VPC(id=v).list_cells()
if subnets:
result.append({
'VpcId': v,
'Subnets': subnets
})
click.echo(pprint(result))
@cloud.group()
def port():
"""enable/disable EC2 instance port"""
pass
@port.command(name='enable')
@click.option(
'-a', '--anywhere', is_flag=True,
default=True,
show_default=True,
help='From Anywhere?'
)
@click.option('--protocol', help='Protocol',
show_default=True,
default='tcp')
@click.option('-p', '--port', required=True, help='Port')
@click.option('-s', '--security-group-id', required=True,
help='Security Group ID')
def enable_port(security_group_id, port, protocol, anywhere):
"""Enable Port from my ip"""
security_group.enable(port, security_group_id, protocol, anywhere)
@port.command(name='disable')
@click.option(
'-a', '--anywhere',
is_flag=True,
default=True,
show_default=True,
help='From Anywhere?'
)
@click.option('--protocol', help='Protocol',
show_default=True,
default='tcp')
@click.option('-p', '--port', required=True, help='Port')
@click.option('-s', '--security-group-id', required=True,
help='Security Group ID')
def disable_port(security_group_id, port, protocol, anywhere):
"""Disable Port from my ip"""
security_group.disable(port, security_group_id, protocol, anywhere)
@cloud.command(name='delete-hosted-zone')
@click.option('--zones-to-retain', required=True,
help='Hosted Zone IDs to retain', multiple=True)
def delete_hosted_zones(zones_to_retain):
"""Delete Hosted Zones"""
hosted_zones.delete_obsolete(zones_to_retain)
return cloud
| apache-2.0 |
cheif/django-rest-framework | tests/test_routers.py | 79 | 13237 | from __future__ import unicode_literals
from collections import namedtuple
from django.conf.urls import include, url
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.test import TestCase
from rest_framework import permissions, serializers, viewsets
from rest_framework.decorators import detail_route, list_route
from rest_framework.response import Response
from rest_framework.routers import DefaultRouter, SimpleRouter
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class RouterTestModel(models.Model):
uuid = models.CharField(max_length=20)
text = models.CharField(max_length=200)
class NoteSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='routertestmodel-detail', lookup_field='uuid')
class Meta:
model = RouterTestModel
fields = ('url', 'uuid', 'text')
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
serializer_class = NoteSerializer
lookup_field = 'uuid'
class KWargedNoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
serializer_class = NoteSerializer
lookup_field = 'text__contains'
lookup_url_kwarg = 'text'
class MockViewSet(viewsets.ModelViewSet):
queryset = None
serializer_class = None
notes_router = SimpleRouter()
notes_router.register(r'notes', NoteViewSet)
kwarged_notes_router = SimpleRouter()
kwarged_notes_router.register(r'notes', KWargedNoteViewSet)
namespaced_router = DefaultRouter()
namespaced_router.register(r'example', MockViewSet, base_name='example')
urlpatterns = [
url(r'^non-namespaced/', include(namespaced_router.urls)),
url(r'^namespaced/', include(namespaced_router.urls, namespace='example')),
url(r'^example/', include(notes_router.urls)),
url(r'^example2/', include(kwarged_notes_router.urls)),
]
class BasicViewSet(viewsets.ViewSet):
def list(self, request, *args, **kwargs):
return Response({'method': 'list'})
@detail_route(methods=['post'])
def action1(self, request, *args, **kwargs):
return Response({'method': 'action1'})
@detail_route(methods=['post'])
def action2(self, request, *args, **kwargs):
return Response({'method': 'action2'})
@detail_route(methods=['post', 'delete'])
def action3(self, request, *args, **kwargs):
return Response({'method': 'action2'})
@detail_route()
def link1(self, request, *args, **kwargs):
return Response({'method': 'link1'})
@detail_route()
def link2(self, request, *args, **kwargs):
return Response({'method': 'link2'})
class TestSimpleRouter(TestCase):
def setUp(self):
self.router = SimpleRouter()
def test_link_and_action_decorator(self):
routes = self.router.get_routes(BasicViewSet)
decorator_routes = routes[2:]
# Make sure all these endpoints exist and none have been clobbered
for i, endpoint in enumerate(['action1', 'action2', 'action3', 'link1', 'link2']):
route = decorator_routes[i]
# check url listing
self.assertEqual(route.url,
'^{{prefix}}/{{lookup}}/{0}{{trailing_slash}}$'.format(endpoint))
# check method to function mapping
if endpoint == 'action3':
methods_map = ['post', 'delete']
elif endpoint.startswith('action'):
methods_map = ['post']
else:
methods_map = ['get']
for method in methods_map:
self.assertEqual(route.mapping[method], endpoint)
class TestRootView(TestCase):
urls = 'tests.test_routers'
def test_retrieve_namespaced_root(self):
response = self.client.get('/namespaced/')
self.assertEqual(
response.data,
{
"example": "http://testserver/namespaced/example/",
}
)
def test_retrieve_non_namespaced_root(self):
response = self.client.get('/non-namespaced/')
self.assertEqual(
response.data,
{
"example": "http://testserver/non-namespaced/example/",
}
)
class TestCustomLookupFields(TestCase):
"""
Ensure that custom lookup fields are correctly routed.
"""
urls = 'tests.test_routers'
def setUp(self):
RouterTestModel.objects.create(uuid='123', text='foo bar')
def test_custom_lookup_field_route(self):
detail_route = notes_router.urls[-1]
detail_url_pattern = detail_route.regex.pattern
self.assertIn('<uuid>', detail_url_pattern)
def test_retrieve_lookup_field_list_view(self):
response = self.client.get('/example/notes/')
self.assertEqual(
response.data,
[{
"url": "http://testserver/example/notes/123/",
"uuid": "123", "text": "foo bar"
}]
)
def test_retrieve_lookup_field_detail_view(self):
response = self.client.get('/example/notes/123/')
self.assertEqual(
response.data,
{
"url": "http://testserver/example/notes/123/",
"uuid": "123", "text": "foo bar"
}
)
class TestLookupValueRegex(TestCase):
"""
Ensure the router honors lookup_value_regex when applied
to the viewset.
"""
def setUp(self):
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
lookup_field = 'uuid'
lookup_value_regex = '[0-9a-f]{32}'
self.router = SimpleRouter()
self.router.register(r'notes', NoteViewSet)
self.urls = self.router.urls
def test_urls_limited_by_lookup_value_regex(self):
expected = ['^notes/$', '^notes/(?P<uuid>[0-9a-f]{32})/$']
for idx in range(len(expected)):
self.assertEqual(expected[idx], self.urls[idx].regex.pattern)
class TestLookupUrlKwargs(TestCase):
"""
Ensure the router honors lookup_url_kwarg.
Setup a deep lookup_field, but map it to a simple URL kwarg.
"""
urls = 'tests.test_routers'
def setUp(self):
RouterTestModel.objects.create(uuid='123', text='foo bar')
def test_custom_lookup_url_kwarg_route(self):
detail_route = kwarged_notes_router.urls[-1]
detail_url_pattern = detail_route.regex.pattern
self.assertIn('^notes/(?P<text>', detail_url_pattern)
def test_retrieve_lookup_url_kwarg_detail_view(self):
response = self.client.get('/example2/notes/fo/')
self.assertEqual(
response.data,
{
"url": "http://testserver/example/notes/123/",
"uuid": "123", "text": "foo bar"
}
)
class TestTrailingSlashIncluded(TestCase):
def setUp(self):
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
self.router = SimpleRouter()
self.router.register(r'notes', NoteViewSet)
self.urls = self.router.urls
def test_urls_have_trailing_slash_by_default(self):
expected = ['^notes/$', '^notes/(?P<pk>[^/.]+)/$']
for idx in range(len(expected)):
self.assertEqual(expected[idx], self.urls[idx].regex.pattern)
class TestTrailingSlashRemoved(TestCase):
def setUp(self):
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
self.router = SimpleRouter(trailing_slash=False)
self.router.register(r'notes', NoteViewSet)
self.urls = self.router.urls
def test_urls_can_have_trailing_slash_removed(self):
expected = ['^notes$', '^notes/(?P<pk>[^/.]+)$']
for idx in range(len(expected)):
self.assertEqual(expected[idx], self.urls[idx].regex.pattern)
class TestNameableRoot(TestCase):
def setUp(self):
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
self.router = DefaultRouter()
self.router.root_view_name = 'nameable-root'
self.router.register(r'notes', NoteViewSet)
self.urls = self.router.urls
def test_router_has_custom_name(self):
expected = 'nameable-root'
self.assertEqual(expected, self.urls[0].name)
class TestActionKeywordArgs(TestCase):
"""
Ensure keyword arguments passed in the `@action` decorator
are properly handled. Refs #940.
"""
def setUp(self):
class TestViewSet(viewsets.ModelViewSet):
permission_classes = []
@detail_route(methods=['post'], permission_classes=[permissions.AllowAny])
def custom(self, request, *args, **kwargs):
return Response({
'permission_classes': self.permission_classes
})
self.router = SimpleRouter()
self.router.register(r'test', TestViewSet, base_name='test')
self.view = self.router.urls[-1].callback
def test_action_kwargs(self):
request = factory.post('/test/0/custom/')
response = self.view(request)
self.assertEqual(
response.data,
{'permission_classes': [permissions.AllowAny]}
)
class TestActionAppliedToExistingRoute(TestCase):
"""
Ensure `@detail_route` decorator raises an except when applied
to an existing route
"""
def test_exception_raised_when_action_applied_to_existing_route(self):
class TestViewSet(viewsets.ModelViewSet):
@detail_route(methods=['post'])
def retrieve(self, request, *args, **kwargs):
return Response({
'hello': 'world'
})
self.router = SimpleRouter()
self.router.register(r'test', TestViewSet, base_name='test')
with self.assertRaises(ImproperlyConfigured):
self.router.urls
class DynamicListAndDetailViewSet(viewsets.ViewSet):
def list(self, request, *args, **kwargs):
return Response({'method': 'list'})
@list_route(methods=['post'])
def list_route_post(self, request, *args, **kwargs):
return Response({'method': 'action1'})
@detail_route(methods=['post'])
def detail_route_post(self, request, *args, **kwargs):
return Response({'method': 'action2'})
@list_route()
def list_route_get(self, request, *args, **kwargs):
return Response({'method': 'link1'})
@detail_route()
def detail_route_get(self, request, *args, **kwargs):
return Response({'method': 'link2'})
@list_route(url_path="list_custom-route")
def list_custom_route_get(self, request, *args, **kwargs):
return Response({'method': 'link1'})
@detail_route(url_path="detail_custom-route")
def detail_custom_route_get(self, request, *args, **kwargs):
return Response({'method': 'link2'})
class SubDynamicListAndDetailViewSet(DynamicListAndDetailViewSet):
pass
class TestDynamicListAndDetailRouter(TestCase):
def setUp(self):
self.router = SimpleRouter()
def _test_list_and_detail_route_decorators(self, viewset):
routes = self.router.get_routes(viewset)
decorator_routes = [r for r in routes if not (r.name.endswith('-list') or r.name.endswith('-detail'))]
MethodNamesMap = namedtuple('MethodNamesMap', 'method_name url_path')
# Make sure all these endpoints exist and none have been clobbered
for i, endpoint in enumerate([MethodNamesMap('list_custom_route_get', 'list_custom-route'),
MethodNamesMap('list_route_get', 'list_route_get'),
MethodNamesMap('list_route_post', 'list_route_post'),
MethodNamesMap('detail_custom_route_get', 'detail_custom-route'),
MethodNamesMap('detail_route_get', 'detail_route_get'),
MethodNamesMap('detail_route_post', 'detail_route_post')
]):
route = decorator_routes[i]
# check url listing
method_name = endpoint.method_name
url_path = endpoint.url_path
if method_name.startswith('list_'):
self.assertEqual(route.url,
'^{{prefix}}/{0}{{trailing_slash}}$'.format(url_path))
else:
self.assertEqual(route.url,
'^{{prefix}}/{{lookup}}/{0}{{trailing_slash}}$'.format(url_path))
# check method to function mapping
if method_name.endswith('_post'):
method_map = 'post'
else:
method_map = 'get'
self.assertEqual(route.mapping[method_map], method_name)
def test_list_and_detail_route_decorators(self):
self._test_list_and_detail_route_decorators(DynamicListAndDetailViewSet)
def test_inherited_list_and_detail_route_decorators(self):
self._test_list_and_detail_route_decorators(SubDynamicListAndDetailViewSet)
| bsd-2-clause |
RAPD/RAPD | src/sites/detectors/lscat_dectris_eiger9m.py | 1 | 8852 | """
Detector description for LS-CAT Eiger 9M
Designed to read the CBF version of the Eiger file
"""
"""
This file is part of RAPD
Copyright (C) 2017, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2017-02-13"
_maintainer__ = "Frank Murphy"
__email__ = "[email protected]"
__status__ = "Development"
# Standard imports
import argparse
import os
import pprint
# RAPD imports
# commandline_utils
# detectors.detector_utils as detector_utils
# utils
# Dectris Pilatus 6M
import detectors.dectris.dectris_eiger9m as detector
import detectors.detector_utils as utils
# Detector information
# The RAPD detector type
DETECTOR = "dectris_eiger9m"
# The detector vendor as it appears in the header
VENDORTYPE = "Eiger-9M"
# The detector serial number as it appears in the header
DETECTOR_SN = "Dectris Eiger 9M S/N E-18-0101"
# The detector suffix "" if there is no suffix
DETECTOR_SUFFIX = ".cbf"
# Is there a run number in the template?
RUN_NUMBER_IN_TEMPLATE = False
# Template for image name generation ? for frame number places
if RUN_NUMBER_IN_TEMPLATE:
#IMAGE_TEMPLATE = "%s.%03d_??????.cbf" # prefix & run number
IMAGE_TEMPLATE = "%s_%03d_??????.cbf" # prefix & run number
else:
IMAGE_TEMPLATE = "%s_??????.cbf" # prefix
# This is a version number for internal RAPD use
# If the header changes, increment this number
HEADER_VERSION = 1
# XDS information for constructing the XDS.INP file
# Import from more generic detector
XDS_FLIP_BEAM = detector.XDS_FLIP_BEAM
# Import from more generic detector
XDSINP0 = detector.XDSINP
# Update the XDS information from the imported detector
# only if there are differnces or new keywords.
# The tuple should contain two items (key and value)
# ie. XDSINP1 = [("SEPMIN", "4"),]
XDSINP1 = [(),
]
XDSINP = utils.merge_xds_input(XDSINP0, XDSINP1)
def parse_file_name(fullname):
"""
Parse the fullname of an image and return
(directory, basename, prefix, run_number, image_number)
Keyword arguments
fullname -- the full path name of the image file
"""
# Directory of the file
directory = os.path.dirname(fullname)
# The basename of the file (i.e. basename - suffix)
basename = os.path.basename(fullname).rstrip(DETECTOR_SUFFIX)
# The prefix, image number, and run number
sbase = basename.split("_")
prefix = "_".join(sbase[0:-2])
image_number = int(sbase[-1])
if RUN_NUMBER_IN_TEMPLATE:
run_number = int(sbase[-2])
prefix = "_".join(sbase[0:-3])
else:
run_number = None
return directory, basename, prefix, run_number, image_number
def create_image_fullname(directory,
image_prefix,
run_number=None,
image_number=None):
"""
Create an image name from parts - the reverse of parse
Keyword arguments
directory -- in which the image file appears
image_prefix -- the prefix before run number or image number
run_number -- number for the run
image_number -- number for the image
"""
if RUN_NUMBER_IN_TEMPLATE:
filename = IMAGE_TEMPLATE.replace("??????", "%06d") % (image_prefix, run_number, image_number)
else:
filename = IMAGE_TEMPLATE.replace("??????", "%06d") % (image_prefix, image_number)
fullname = os.path.join(directory, filename)
return fullname
def create_image_template(image_prefix, run_number):
"""
Create an image template for XDS
"""
# print "create_image_template %s %d" % (image_prefix, run_number)
if RUN_NUMBER_IN_TEMPLATE:
image_template = IMAGE_TEMPLATE % (image_prefix, run_number)
else:
image_template = IMAGE_TEMPLATE % image_prefix
# print "image_template: %s" % image_template
return image_template
def calculate_flux(header, site_params):
"""
Calculate the flux as a function of transmission and aperture size.
"""
beam_size_x = site_params.get('BEAM_SIZE_X')
beam_size_y = site_params.get('BEAM_SIZE_Y')
aperture = header.get('md2_aperture')
new_x = beam_size_x
new_y = beam_size_y
if aperture < beam_size_x:
new_x = aperture
if aperture < beam_size_y:
new_y = aperture
# Calculate area of full beam used to calculate the beamline flux
# Assume ellipse, but same equation works for circle.
# Assume beam is uniform
full_beam_area = numpy.pi*(beam_size_x/2)*(beam_size_y/2)
# Calculate the new beam area (with aperture) divided by the full_beam_area.
# Since aperture is round, it will be cutting off edges of x length until it matches beam height,
# then it would switch to circle
if beam_size_y <= aperture:
# ellipse
ratio = (numpy.pi*(aperture/2)*(beam_size_y/2)) / full_beam_area
else:
# circle
ratio = (numpy.pi*(aperture/2)**2) / full_beam_area
# Calculate the new_beam_area ratio to full_beam_area
flux = int(round(site_params.get('BEAM_FLUX') * (header.get('transmission')/100) * ratio))
# Return the flux and beam size
return (flux, new_x, new_y)
def get_data_root_dir(fullname):
"""
Derive the data root directory from the user directory
The logic will most likely be unique for each site
Keyword arguments
fullname -- the full path name of the image file
"""
# Isolate distinct properties of the images path
path_split = fullname.split(os.path.sep)
data_root_dir = os.path.join("/", *path_split[1:3])
# Return the determined directory
return data_root_dir
def read_header(input_file=False, beam_settings=False, extra_header=False):
"""
Read header from image file and return dict
Keyword variables
fullname -- full path name of the image file to be read
beam_settings -- source information from site file
"""
# Perform the header read from the file
# If you are importing another detector, this should work
if input_file.endswith(".h5"):
header = utils.read_hdf5_header(input_file)
elif input_file.endswith(".cbf"):
header = detector.read_header(input_file)
basename = os.path.basename(input_file)
#header["image_prefix"] = ".".join(basename.replace(".cbf", "").split(".")[:-1])
header["image_prefix"] ="_".join(basename.replace(".cbf", "").split("_")[:-1])
# Add run_number (if used) and image template for processing
if RUN_NUMBER_IN_TEMPLATE:
#header["run_number"] = int(basename.replace(".cbf", "").split("_")[-1])
header["run_number"] = int(basename.replace(".cbf", "").split("_")[-2])
header["image_template"] = IMAGE_TEMPLATE % (header["image_prefix"], header["run_number"])
else:
header["run_number"] = None
header["image_template"] = IMAGE_TEMPLATE % header["image_prefix"]
# Add tag for module to header
header["rapd_detector_id"] = "lscat_dectris_eiger9m"
header["run_number_in_template"] = RUN_NUMBER_IN_TEMPLATE
# Return the header
return header
def get_commandline():
"""
Grabs the commandline
"""
print "get_commandline"
# Parse the commandline arguments
commandline_description = "Generate a generic RAPD file"
parser = argparse.ArgumentParser(description=commandline_description)
# File name to be generated
parser.add_argument(action="store",
dest="file",
nargs="?",
default=False,
help="Name of file to be generated")
return parser.parse_args()
def main(args):
"""
The main process docstring
This function is called when this module is invoked from
the commandline
"""
print "main"
if args.file:
test_image = os.path.abspath(args.file)
else:
raise Error("No test image input!")
# Read the header
if test_image.endswith(".h5"):
header = read_header(hdf5_file=test_image)
elif test_image.endswith(".cbf"):
header = read_header(cbf_file=test_image)
# And print it out
pprint.pprint(header)
if __name__ == "__main__":
# Get the commandline args
commandline_args = get_commandline()
# Execute code
main(args=commandline_args)
| agpl-3.0 |
cristiana214/cristianachavez214-cristianachavez | python/src/Lib/encodings/cp855.py | 593 | 34106 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
aperigault/ansible | lib/ansible/module_utils/digital_ocean.py | 104 | 5769 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Ansible Project 2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(to_text(self.info["body"]))
return None
try:
return json.loads(to_text(self.body))
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class DigitalOceanHelper:
def __init__(self, module):
self.module = module
self.baseurl = 'https://api.digitalocean.com/v2'
self.timeout = module.params.get('timeout', 30)
self.oauth_token = module.params.get('oauth_token')
self.headers = {'Authorization': 'Bearer {0}'.format(self.oauth_token),
'Content-type': 'application/json'}
# Check if api_token is valid or not
response = self.get('account')
if response.status_code == 401:
self.module.fail_json(msg='Failed to login using API token, please verify validity of API token.')
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method, timeout=self.timeout)
return Response(resp, info)
def get(self, path, data=None):
return self.send('GET', path, data)
def put(self, path, data=None):
return self.send('PUT', path, data)
def post(self, path, data=None):
return self.send('POST', path, data)
def delete(self, path, data=None):
return self.send('DELETE', path, data)
@staticmethod
def digital_ocean_argument_spec():
return dict(
validate_certs=dict(type='bool', required=False, default=True),
oauth_token=dict(
no_log=True,
# Support environment variable for DigitalOcean OAuth Token
fallback=(env_fallback, ['DO_API_TOKEN', 'DO_API_KEY', 'DO_OAUTH_TOKEN', 'OAUTH_TOKEN']),
required=False,
aliases=['api_token'],
),
timeout=dict(type='int', default=30),
)
def get_paginated_data(self, base_url=None, data_key_name=None, data_per_page=40, expected_status_code=200):
"""
Function to get all paginated data from given URL
Args:
base_url: Base URL to get data from
data_key_name: Name of data key value
data_per_page: Number results per page (Default: 40)
expected_status_code: Expected returned code from DigitalOcean (Default: 200)
Returns: List of data
"""
page = 1
has_next = True
ret_data = []
status_code = None
response = None
while has_next or status_code != expected_status_code:
required_url = "{0}page={1}&per_page={2}".format(base_url, page, data_per_page)
response = self.get(required_url)
status_code = response.status_code
# stop if any error during pagination
if status_code != expected_status_code:
break
page += 1
ret_data.extend(response.json[data_key_name])
has_next = "pages" in response.json["links"] and "next" in response.json["links"]["pages"]
if status_code != expected_status_code:
msg = "Failed to fetch %s from %s" % (data_key_name, base_url)
if response:
msg += " due to error : %s" % response.json['message']
self.module.fail_json(msg=msg)
return ret_data
| gpl-3.0 |
Ander-Alvarez/android_kernel_motorola_msm8916 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
hgl888/chromium-crosswalk | build/empty_google_play_services_lib.py | 7 | 2305 | #!/usr/bin/env python
# Copyright (c) 2015 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Create an empty google-play-services.jar inside src/third_party/android_tools.
https://chromium-review.googlesource.com/#/c/247861 has introduced a check in
android_tools.gyp that makes the gyp configuration process fail if a certain
file that is part of the Google Play Services library is not found.
Since installing that library involves manually accepting an EULA and it is not
used in Crosswalk, we create an empty file with the name android_tools.gyp
checks for so that the build configuration proceeds. If the user chooses to
manually install the library, the empty file is just overwritten and nothing
breaks. Additionally, we also create res/ and src/ so that src/build/java.gypi
can call find(1) in those directories without failing.
"""
import errno
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), 'android'))
from pylib.constants import ANDROID_SDK_ROOT
def CreateDirectory(path):
"""
Creates |path| and all missing parent directories. Passing a directory that
already exists does not cause an error.
"""
try:
os.makedirs(path)
except OSError, e:
if e.errno == errno.EEXIST:
pass
def CreateFileIfMissing(path):
"""
Creates an empty file called |path| if it does not already exist.
"""
if os.path.isfile(path):
return
open(path, 'w').close()
if __name__ == '__main__':
# If ANDROID_SDK_ROOT does not exist, we can assume the android_tools
# repository has not been checked out. Consequently, this is not an Android
# build and we do not need to worry about the issue this script works around.
if not os.path.isdir(ANDROID_SDK_ROOT):
sys.exit(0)
google_play_lib_root = os.path.join(
ANDROID_SDK_ROOT, 'extras', 'google', 'google_play_services', 'libproject',
'google-play-services_lib')
CreateDirectory(os.path.join(google_play_lib_root, 'libs'))
CreateDirectory(os.path.join(google_play_lib_root, 'res'))
CreateDirectory(os.path.join(google_play_lib_root, 'src'))
CreateFileIfMissing(os.path.join(google_play_lib_root, 'libs',
'google-play-services.jar'))
| bsd-3-clause |
yrizk/django-blog | blogvenv/lib/python3.4/site-packages/setuptools/tests/test_easy_install.py | 71 | 19213 | # -*- coding: utf-8 -*-
"""Easy install Tests
"""
from __future__ import absolute_import
import sys
import os
import shutil
import tempfile
import site
import contextlib
import tarfile
import logging
import itertools
import distutils.errors
import pytest
try:
from unittest import mock
except ImportError:
import mock
from setuptools import sandbox
from setuptools import compat
from setuptools.compat import StringIO, BytesIO, urlparse
from setuptools.sandbox import run_setup
import setuptools.command.easy_install as ei
from setuptools.command.easy_install import PthDistributions
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
from pkg_resources import working_set
from pkg_resources import Distribution as PRDistribution
import setuptools.tests.server
import pkg_resources
from .py26compat import tarfile_open
from . import contexts
from .textwrap import DALS
class FakeDist(object):
def get_entry_map(self, group):
if group != 'console_scripts':
return {}
return {'name': 'ep'}
def as_requirement(self):
return 'spec'
SETUP_PY = DALS("""
from setuptools import setup
setup(name='foo')
""")
class TestEasyInstallTest:
def test_install_site_py(self):
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.sitepy_installed = False
cmd.install_dir = tempfile.mkdtemp()
try:
cmd.install_site_py()
sitepy = os.path.join(cmd.install_dir, 'site.py')
assert os.path.exists(sitepy)
finally:
shutil.rmtree(cmd.install_dir)
def test_get_script_args(self):
header = ei.CommandSpec.best().from_environment().as_header()
expected = header + DALS("""
# EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name'
__requires__ = 'spec'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('spec', 'console_scripts', 'name')()
)
""")
dist = FakeDist()
args = next(ei.ScriptWriter.get_args(dist))
name, script = itertools.islice(args, 2)
assert script == expected
def test_no_find_links(self):
# new option '--no-find-links', that blocks find-links added at
# the project level
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.no_find_links = True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
assert cmd.package_index.scanned_urls == {}
# let's try without it (default behavior)
cmd = ei.easy_install(dist)
cmd.check_pth_processing = lambda: True
cmd.find_links = ['link1', 'link2']
cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok')
cmd.args = ['ok']
cmd.ensure_finalized()
keys = sorted(cmd.package_index.scanned_urls.keys())
assert keys == ['link1', 'link2']
def test_write_exception(self):
"""
Test that `cant_write_to_target` is rendered as a DistutilsError.
"""
dist = Distribution()
cmd = ei.easy_install(dist)
cmd.install_dir = os.getcwd()
with pytest.raises(distutils.errors.DistutilsError):
cmd.cant_write_to_target()
class TestPTHFileWriter:
def test_add_from_cwd_site_sets_dirty(self):
'''a pth file manager should set dirty
if a distribution is in site but also the cwd
'''
pth = PthDistributions('does-not_exist', [os.getcwd()])
assert not pth.dirty
pth.add(PRDistribution(os.getcwd()))
assert pth.dirty
def test_add_from_site_is_ignored(self):
location = '/test/location/does-not-have-to-exist'
# PthDistributions expects all locations to be normalized
location = pkg_resources.normalize_path(location)
pth = PthDistributions('does-not_exist', [location, ])
assert not pth.dirty
pth.add(PRDistribution(location))
assert not pth.dirty
@pytest.yield_fixture
def setup_context(tmpdir):
with (tmpdir/'setup.py').open('w') as f:
f.write(SETUP_PY)
with tmpdir.as_cwd():
yield tmpdir
@pytest.mark.usefixtures("user_override")
@pytest.mark.usefixtures("setup_context")
class TestUserInstallTest:
@mock.patch('setuptools.command.easy_install.__file__', None)
def test_user_install_implied(self):
easy_install_pkg.__file__ = site.USER_SITE
site.ENABLE_USER_SITE = True # disabled sometimes
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.args = ['py']
cmd.ensure_finalized()
assert cmd.user, 'user should be implied'
def test_multiproc_atexit(self):
try:
__import__('multiprocessing')
except ImportError:
# skip the test if multiprocessing is not available
return
log = logging.getLogger('test_easy_install')
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
log.info('this should not break')
def test_user_install_not_implied_without_usersite_enabled(self):
site.ENABLE_USER_SITE = False # usually enabled
#XXX: replace with something meaningfull
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.args = ['py']
cmd.initialize_options()
assert not cmd.user, 'NOT user should be implied'
def test_local_index(self):
# make sure the local index is used
# when easy_install looks for installed
# packages
new_location = tempfile.mkdtemp()
target = tempfile.mkdtemp()
egg_file = os.path.join(new_location, 'foo-1.0.egg-info')
with open(egg_file, 'w') as f:
f.write('Name: foo\n')
sys.path.append(target)
old_ppath = os.environ.get('PYTHONPATH')
os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path)
try:
dist = Distribution()
dist.script_name = 'setup.py'
cmd = ei.easy_install(dist)
cmd.install_dir = target
cmd.args = ['foo']
cmd.ensure_finalized()
cmd.local_index.scan([new_location])
res = cmd.easy_install('foo')
actual = os.path.normcase(os.path.realpath(res.location))
expected = os.path.normcase(os.path.realpath(new_location))
assert actual == expected
finally:
sys.path.remove(target)
for basedir in [new_location, target, ]:
if not os.path.exists(basedir) or not os.path.isdir(basedir):
continue
try:
shutil.rmtree(basedir)
except:
pass
if old_ppath is not None:
os.environ['PYTHONPATH'] = old_ppath
else:
del os.environ['PYTHONPATH']
@contextlib.contextmanager
def user_install_setup_context(self, *args, **kwargs):
"""
Wrap sandbox.setup_context to patch easy_install in that context to
appear as user-installed.
"""
with self.orig_context(*args, **kwargs):
import setuptools.command.easy_install as ei
ei.__file__ = site.USER_SITE
yield
def patched_setup_context(self):
self.orig_context = sandbox.setup_context
return mock.patch(
'setuptools.sandbox.setup_context',
self.user_install_setup_context,
)
def test_setup_requires(self):
"""Regression test for Distribute issue #318
Ensure that a package with setup_requires can be installed when
setuptools is installed in the user site-packages without causing a
SandboxViolation.
"""
test_pkg = create_setup_requires_package(os.getcwd())
test_setup_py = os.path.join(test_pkg, 'setup.py')
try:
with contexts.quiet():
with self.patched_setup_context():
run_setup(test_setup_py, ['install'])
except IndexError:
# Test fails in some cases due to bugs in Python
# See https://bitbucket.org/pypa/setuptools/issue/201
pass
@pytest.yield_fixture
def distutils_package():
distutils_setup_py = SETUP_PY.replace(
'from setuptools import setup',
'from distutils.core import setup',
)
with contexts.tempdir(cd=os.chdir):
with open('setup.py', 'w') as f:
f.write(distutils_setup_py)
yield
class TestDistutilsPackage:
def test_bdist_egg_available_on_distutils_pkg(self, distutils_package):
run_setup('setup.py', ['bdist_egg'])
class TestSetupRequires:
def test_setup_requires_honors_fetch_params(self):
"""
When easy_install installs a source distribution which specifies
setup_requires, it should honor the fetch parameters (such as
allow-hosts, index-url, and find-links).
"""
# set up a server which will simulate an alternate package index.
p_index = setuptools.tests.server.MockServer()
p_index.start()
netloc = 1
p_index_loc = urlparse(p_index.url)[netloc]
if p_index_loc.endswith(':0'):
# Some platforms (Jython) don't find a port to which to bind,
# so skip this test for them.
return
with contexts.quiet():
# create an sdist that has a build-time dependency.
with TestSetupRequires.create_sdist() as dist_file:
with contexts.tempdir() as temp_install_dir:
with contexts.environment(PYTHONPATH=temp_install_dir):
ei_params = [
'--index-url', p_index.url,
'--allow-hosts', p_index_loc,
'--exclude-scripts',
'--install-dir', temp_install_dir,
dist_file,
]
with contexts.argv(['easy_install']):
# attempt to install the dist. It should fail because
# it doesn't exist.
with pytest.raises(SystemExit):
easy_install_pkg.main(ei_params)
# there should have been two or three requests to the server
# (three happens on Python 3.3a)
assert 2 <= len(p_index.requests) <= 3
assert p_index.requests[0].path == '/does-not-exist/'
@staticmethod
@contextlib.contextmanager
def create_sdist():
"""
Return an sdist with a setup_requires dependency (of something that
doesn't exist)
"""
with contexts.tempdir() as dir:
dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz')
script = DALS("""
import setuptools
setuptools.setup(
name="setuptools-test-fetcher",
version="1.0",
setup_requires = ['does-not-exist'],
)
""")
make_trivial_sdist(dist_path, script)
yield dist_path
def test_setup_requires_overrides_version_conflict(self):
"""
Regression test for issue #323.
Ensures that a distribution's setup_requires requirements can still be
installed and used locally even if a conflicting version of that
requirement is already on the path.
"""
pr_state = pkg_resources.__getstate__()
fake_dist = PRDistribution('does-not-matter', project_name='foobar',
version='0.0')
working_set.add(fake_dist)
try:
with contexts.tempdir() as temp_dir:
test_pkg = create_setup_requires_package(temp_dir)
test_setup_py = os.path.join(test_pkg, 'setup.py')
with contexts.quiet() as (stdout, stderr):
# Don't even need to install the package, just
# running the setup.py at all is sufficient
run_setup(test_setup_py, ['--name'])
lines = stdout.readlines()
assert len(lines) > 0
assert lines[-1].strip(), 'test_pkg'
finally:
pkg_resources.__setstate__(pr_state)
def create_setup_requires_package(path):
"""Creates a source tree under path for a trivial test package that has a
single requirement in setup_requires--a tarball for that requirement is
also created and added to the dependency_links argument.
"""
test_setup_attrs = {
'name': 'test_pkg', 'version': '0.0',
'setup_requires': ['foobar==0.1'],
'dependency_links': [os.path.abspath(path)]
}
test_pkg = os.path.join(path, 'test_pkg')
test_setup_py = os.path.join(test_pkg, 'setup.py')
os.mkdir(test_pkg)
with open(test_setup_py, 'w') as f:
f.write(DALS("""
import setuptools
setuptools.setup(**%r)
""" % test_setup_attrs))
foobar_path = os.path.join(path, 'foobar-0.1.tar.gz')
make_trivial_sdist(
foobar_path,
DALS("""
import setuptools
setuptools.setup(
name='foobar',
version='0.1'
)
"""))
return test_pkg
def make_trivial_sdist(dist_path, setup_py):
"""Create a simple sdist tarball at dist_path, containing just a
setup.py, the contents of which are provided by the setup_py string.
"""
setup_py_file = tarfile.TarInfo(name='setup.py')
try:
# Python 3 (StringIO gets converted to io module)
MemFile = BytesIO
except AttributeError:
MemFile = StringIO
setup_py_bytes = MemFile(setup_py.encode('utf-8'))
setup_py_file.size = len(setup_py_bytes.getvalue())
with tarfile_open(dist_path, 'w:gz') as dist:
dist.addfile(setup_py_file, fileobj=setup_py_bytes)
class TestScriptHeader:
non_ascii_exe = '/Users/José/bin/python'
exe_with_spaces = r'C:\Program Files\Python33\python.exe'
@pytest.mark.skipif(
sys.platform.startswith('java') and ei.is_sh(sys.executable),
reason="Test cannot run under java when executable is sh"
)
def test_get_script_header(self):
expected = '#!%s\n' % ei.nt_quote_arg(os.path.normpath(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python')
assert actual == expected
expected = '#!%s -x\n' % ei.nt_quote_arg(os.path.normpath
(sys.executable))
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x')
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
expected = '#!%s -x\n' % self.non_ascii_exe
assert actual == expected
actual = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable='"'+self.exe_with_spaces+'"')
expected = '#!"%s"\n' % self.exe_with_spaces
assert actual == expected
@pytest.mark.xfail(
compat.PY3 and os.environ.get("LC_CTYPE") in ("C", "POSIX"),
reason="Test fails in this locale on Python 3"
)
@mock.patch.dict(sys.modules, java=mock.Mock(lang=mock.Mock(System=
mock.Mock(getProperty=mock.Mock(return_value="")))))
@mock.patch('sys.platform', 'java1.5.0_13')
def test_get_script_header_jython_workaround(self, tmpdir):
# Create a mock sys.executable that uses a shebang line
header = DALS("""
#!/usr/bin/python
# -*- coding: utf-8 -*-
""")
exe = tmpdir / 'exe.py'
with exe.open('w') as f:
f.write(header)
exe = str(exe)
header = ei.ScriptWriter.get_script_header('#!/usr/local/bin/python',
executable=exe)
assert header == '#!/usr/bin/env %s\n' % exe
expect_out = 'stdout' if sys.version_info < (2,7) else 'stderr'
with contexts.quiet() as (stdout, stderr):
# When options are included, generate a broken shebang line
# with a warning emitted
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python -x',
executable=exe)
assert candidate == '#!%s -x\n' % exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
with contexts.quiet() as (stdout, stderr):
candidate = ei.ScriptWriter.get_script_header('#!/usr/bin/python',
executable=self.non_ascii_exe)
assert candidate == '#!%s -x\n' % self.non_ascii_exe
output = locals()[expect_out]
assert 'Unable to adapt shebang line' in output.getvalue()
class TestCommandSpec:
def test_custom_launch_command(self):
"""
Show how a custom CommandSpec could be used to specify a #! executable
which takes parameters.
"""
cmd = ei.CommandSpec(['/usr/bin/env', 'python3'])
assert cmd.as_header() == '#!/usr/bin/env python3\n'
def test_from_param_for_CommandSpec_is_passthrough(self):
"""
from_param should return an instance of a CommandSpec
"""
cmd = ei.CommandSpec(['python'])
cmd_new = ei.CommandSpec.from_param(cmd)
assert cmd is cmd_new
def test_from_environment_with_spaces_in_executable(self):
with mock.patch('sys.executable', TestScriptHeader.exe_with_spaces):
cmd = ei.CommandSpec.from_environment()
assert len(cmd) == 1
assert cmd.as_header().startswith('#!"')
def test_from_simple_string_uses_shlex(self):
"""
In order to support `executable = /usr/bin/env my-python`, make sure
from_param invokes shlex on that input.
"""
cmd = ei.CommandSpec.from_param('/usr/bin/env my-python')
assert len(cmd) == 2
assert '"' not in cmd.as_header()
def test_sys_executable(self):
"""
CommandSpec.from_string(sys.executable) should contain just that param.
"""
writer = ei.ScriptWriter.best()
cmd = writer.command_spec_class.from_string(sys.executable)
assert len(cmd) == 1
assert cmd[0] == sys.executable
class TestWindowsScriptWriter:
def test_header(self):
hdr = ei.WindowsScriptWriter.get_script_header('')
assert hdr.startswith('#!')
assert hdr.endswith('\n')
hdr = hdr.lstrip('#!')
hdr = hdr.rstrip('\n')
# header should not start with an escaped quote
assert not hdr.startswith('\\"')
| apache-2.0 |
omerhasan/namebench | nb_third_party/dns/rdtypes/ANY/NSEC.py | 235 | 5296 | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
class NSEC(dns.rdata.Rdata):
"""NSEC record
@ivar next: the next name
@type next: dns.name.Name object
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['next', 'windows']
def __init__(self, rdclass, rdtype, next, windows):
super(NSEC, self).__init__(rdclass, rdtype)
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = self.next.choose_relativity(origin, relativize)
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = ord(bitmap[i])
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 + \
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%s%s' % (next, text)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
next = tok.get_name()
next = next.choose_relativity(origin, relativize)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = ['\0'] * 32
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, ''.join(bitmap[0:octets])))
bitmap = ['\0'] * 32
window = new_window
offset = nrdtype % 256
byte = offset / 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = chr(ord(bitmap[byte]) | (0x80 >> bit))
windows.append((window, ''.join(bitmap[0:octets])))
return cls(rdclass, rdtype, next, windows)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.next.to_wire(file, None, origin)
for (window, bitmap) in self.windows:
file.write(chr(window))
file.write(chr(len(bitmap)))
file.write(bitmap)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(next, cused) = dns.name.from_wire(wire[: current + rdlen], current)
current += cused
rdlen -= cused
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC too short")
window = ord(wire[current])
octets = ord(wire[current + 1])
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC bitmap length")
bitmap = wire[current : current + octets]
current += octets
rdlen -= octets
windows.append((window, bitmap))
if not origin is None:
next = next.relativize(origin)
return cls(rdclass, rdtype, next, windows)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.next = self.next.choose_relativity(origin, relativize)
def _cmp(self, other):
v = cmp(self.next, other.next)
if v == 0:
b1 = cStringIO.StringIO()
for (window, bitmap) in self.windows:
b1.write(chr(window))
b1.write(chr(len(bitmap)))
b1.write(bitmap)
b2 = cStringIO.StringIO()
for (window, bitmap) in other.windows:
b2.write(chr(window))
b2.write(chr(len(bitmap)))
b2.write(bitmap)
v = cmp(b1.getvalue(), b2.getvalue())
return v
| apache-2.0 |
luxus/home-assistant | homeassistant/components/sensor/torque.py | 5 | 3109 | """
Support for the Torque OBD application.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.torque/
"""
import re
from homeassistant.const import HTTP_OK
from homeassistant.helpers.entity import Entity
DOMAIN = 'torque'
DEPENDENCIES = ['http']
SENSOR_EMAIL_FIELD = 'eml'
DEFAULT_NAME = 'vehicle'
ENTITY_NAME_FORMAT = '{0} {1}'
API_PATH = '/api/torque'
SENSOR_NAME_KEY = r'userFullName(\w+)'
SENSOR_UNIT_KEY = r'userUnit(\w+)'
SENSOR_VALUE_KEY = r'k(\w+)'
NAME_KEY = re.compile(SENSOR_NAME_KEY)
UNIT_KEY = re.compile(SENSOR_UNIT_KEY)
VALUE_KEY = re.compile(SENSOR_VALUE_KEY)
def decode(value):
"""Double-decode required."""
return value.encode('raw_unicode_escape').decode('utf-8')
def convert_pid(value):
"""Convert pid from hex string to integer."""
return int(value, 16)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Torque platform."""
vehicle = config.get('name', DEFAULT_NAME)
email = config.get('email', None)
sensors = {}
def _receive_data(handler, path_match, data):
"""Received data from Torque."""
handler.send_response(HTTP_OK)
handler.end_headers()
if email is not None and email != data[SENSOR_EMAIL_FIELD]:
return
names = {}
units = {}
for key in data:
is_name = NAME_KEY.match(key)
is_unit = UNIT_KEY.match(key)
is_value = VALUE_KEY.match(key)
if is_name:
pid = convert_pid(is_name.group(1))
names[pid] = decode(data[key])
elif is_unit:
pid = convert_pid(is_unit.group(1))
units[pid] = decode(data[key])
elif is_value:
pid = convert_pid(is_value.group(1))
if pid in sensors:
sensors[pid].on_update(data[key])
for pid in names:
if pid not in sensors:
sensors[pid] = TorqueSensor(
ENTITY_NAME_FORMAT.format(vehicle, names[pid]),
units.get(pid, None))
add_devices([sensors[pid]])
hass.http.register_path('GET', API_PATH, _receive_data)
return True
class TorqueSensor(Entity):
"""Representation of a Torque sensor."""
def __init__(self, name, unit):
"""Initialize the sensor."""
self._name = name
self._unit = unit
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the default icon of the sensor."""
return 'mdi:car'
def on_update(self, value):
"""Receive an update."""
self._state = value
self.update_ha_state()
| mit |
Arcanemagus/SickRage | lib/hachoir_parser/video/mpeg_video.py | 86 | 22580 | """
Moving Picture Experts Group (MPEG) video version 1 and 2 parser.
Information:
- http://www.mpucoder.com/DVD/
- http://dvd.sourceforge.net/dvdinfo/
- http://www.mit.jyu.fi/mweber/leffakone/software/parsempegts/
- http://homepage.mac.com/rnc/EditMpegHeaderIFO.html
- http://standards.iso.org/ittf/PubliclyAvailableStandards/c025029_ISO_IEC_TR_11172-5_1998(E)_Software_Simulation.zip
This is a sample encoder/decoder implementation for MPEG-1.
Author: Victor Stinner
Creation date: 15 september 2006
"""
from hachoir_parser import Parser
from hachoir_parser.audio.mpeg_audio import MpegAudioFile
from hachoir_core.field import (FieldSet,
FieldError, ParserError,
Bit, Bits, Bytes, RawBits, PaddingBits, NullBits,
UInt8, UInt16,
RawBytes, PaddingBytes,
Enum)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.stream import StringInputStream
from hachoir_core.text_handler import textHandler, hexadecimal
class FragmentGroup:
def __init__(self, parser):
self.items = []
self.parser = parser
self.args = {}
def add(self, item):
self.items.append(item)
def createInputStream(self):
# FIXME: Use lazy stream creation
data = []
for item in self.items:
if 'rawdata' in item:
data.append( item["rawdata"].value )
data = "".join(data)
# FIXME: Use smarter code to send arguments
tags = {"class": self.parser, "args": self.args}
tags = tags.iteritems()
return StringInputStream(data, "<fragment group>", tags=tags)
class CustomFragment(FieldSet):
def __init__(self, parent, name, size, parser, description=None, group=None):
FieldSet.__init__(self, parent, name, description, size=size)
if not group:
group = FragmentGroup(parser)
self.group = group
self.group.add(self)
def createFields(self):
yield RawBytes(self, "rawdata", self.size//8)
def _createInputStream(self, **args):
return self.group.createInputStream()
class Timestamp(FieldSet):
static_size = 36
def createValue(self):
return (self["c"].value << 30) + (self["b"].value << 15) + self["a"].value
def createFields(self):
yield Bits(self, "c", 3)
yield Bit(self, "sync[]") # =True
yield Bits(self, "b", 15)
yield Bit(self, "sync[]") # =True
yield Bits(self, "a", 15)
yield Bit(self, "sync[]") # =True
class SCR(FieldSet):
static_size = 35
def createFields(self):
yield Bits(self, "scr_a", 3)
yield Bit(self, "sync[]") # =True
yield Bits(self, "scr_b", 15)
yield Bit(self, "sync[]") # =True
yield Bits(self, "scr_c", 15)
class PackHeader(FieldSet):
def createFields(self):
if self.stream.readBits(self.absolute_address, 2, self.endian) == 1:
# MPEG version 2
yield Bits(self, "sync[]", 2)
yield SCR(self, "scr")
yield Bit(self, "sync[]")
yield Bits(self, "scr_ext", 9)
yield Bit(self, "sync[]")
yield Bits(self, "mux_rate", 22)
yield Bits(self, "sync[]", 2)
yield PaddingBits(self, "reserved", 5, pattern=1)
yield Bits(self, "stuffing_length", 3)
count = self["stuffing_length"].value
if count:
yield PaddingBytes(self, "stuffing", count, pattern="\xff")
else:
# MPEG version 1
yield Bits(self, "sync[]", 4)
yield Bits(self, "scr_a", 3)
yield Bit(self, "sync[]")
yield Bits(self, "scr_b", 15)
yield Bit(self, "sync[]")
yield Bits(self, "scr_c", 15)
yield Bits(self, "sync[]", 2)
yield Bits(self, "mux_rate", 22)
yield Bit(self, "sync[]")
def validate(self):
if self["mux_rate"].value == 0:
return "Invalid mux rate"
sync0 = self["sync[0]"]
if (sync0.size == 2 and sync0.value == 1):
# MPEG2
pass
if not self["sync[1]"].value \
or not self["sync[2]"].value \
or self["sync[3]"].value != 3:
return "Invalid synchronisation bits"
elif (sync0.size == 4 and sync0.value == 2):
# MPEG1
if not self["sync[1]"].value \
or not self["sync[2]"].value \
or self["sync[3]"].value != 3 \
or not self["sync[4]"].value:
return "Invalid synchronisation bits"
else:
return "Unknown version"
return True
class SystemHeader(FieldSet):
def createFields(self):
yield Bits(self, "marker[]", 1)
yield Bits(self, "rate_bound", 22)
yield Bits(self, "marker[]", 1)
yield Bits(self, "audio_bound", 6)
yield Bit(self, "fixed_bitrate")
yield Bit(self, "csps", description="Constrained system parameter stream")
yield Bit(self, "audio_lock")
yield Bit(self, "video_lock")
yield Bits(self, "marker[]", 1)
yield Bits(self, "video_bound", 5)
length = self['../length'].value-5
if length:
yield RawBytes(self, "raw[]", length)
class defaultParser(FieldSet):
def createFields(self):
yield RawBytes(self, "data", self["../length"].value)
class Padding(FieldSet):
def createFields(self):
yield PaddingBytes(self, "data", self["../length"].value)
class VideoExtension2(FieldSet):
def createFields(self):
yield Bit(self, "sync[]") # =True
yield Bits(self, "ext_length", 7)
yield NullBits(self, "reserved[]", 8)
size = self["ext_length"].value
if size:
yield RawBytes(self, "ext_bytes", size)
class VideoExtension1(FieldSet):
def createFields(self):
yield Bit(self, "has_private")
yield Bit(self, "has_pack_lgth")
yield Bit(self, "has_pack_seq")
yield Bit(self, "has_pstd_buffer")
yield Bits(self, "sync[]", 3) # =7
yield Bit(self, "has_extension2")
if self["has_private"].value:
yield RawBytes(self, "private", 16)
if self["has_pack_lgth"].value:
yield UInt8(self, "pack_lgth")
if self["has_pack_seq"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "pack_seq_counter", 7)
yield Bit(self, "sync[]") # =True
yield Bit(self, "mpeg12_id")
yield Bits(self, "orig_stuffing_length", 6)
if self["has_pstd_buffer"].value:
yield Bits(self, "sync[]", 2) # =1
yield Enum(Bit(self, "pstd_buffer_scale"),
{True: "128 bytes", False: "1024 bytes"})
yield Bits(self, "pstd_size", 13)
class VideoSeqHeader(FieldSet):
ASPECT=["forbidden", "1.0000 (VGA etc.)", "0.6735",
"0.7031 (16:9, 625line)", "0.7615", "0.8055",
"0.8437 (16:9, 525line)", "0.8935",
"0.9157 (CCIR601, 625line)", "0.9815", "1.0255", "1.0695",
"1.0950 (CCIR601, 525line)", "1.1575", "1.2015", "reserved"]
FRAMERATE=["forbidden", "23.976 fps", "24 fps", "25 fps", "29.97 fps",
"30 fps", "50 fps", "59.94 fps", "60 fps"]
def createFields(self):
yield Bits(self, "width", 12)
yield Bits(self, "height", 12)
yield Enum(Bits(self, "aspect", 4), self.ASPECT)
yield Enum(Bits(self, "frame_rate", 4), self.FRAMERATE)
yield Bits(self, "bit_rate", 18, "Bit rate in units of 50 bytes")
yield Bits(self, "sync[]", 1) # =1
yield Bits(self, "vbv_size", 10, "Video buffer verifier size, in units of 16768")
yield Bit(self, "constrained_params_flag")
yield Bit(self, "has_intra_quantizer")
if self["has_intra_quantizer"].value:
for i in range(64):
yield Bits(self, "intra_quantizer[]", 8)
yield Bit(self, "has_non_intra_quantizer")
if self["has_non_intra_quantizer"].value:
for i in range(64):
yield Bits(self, "non_intra_quantizer[]", 8)
class GroupStart(FieldSet):
def createFields(self):
yield Bit(self, "drop_frame")
yield Bits(self, "time_hh", 5)
yield Bits(self, "time_mm", 6)
yield PaddingBits(self, "time_pad[]", 1)
yield Bits(self, "time_ss", 6)
yield Bits(self, "time_ff", 6)
yield Bit(self, "closed_group")
yield Bit(self, "broken_group")
yield PaddingBits(self, "pad[]", 5)
class PacketElement(FieldSet):
def createFields(self):
yield Bits(self, "sync[]", 2) # =2
if self["sync[0]"].value != 2:
raise ParserError("Unknown video elementary data")
yield Bits(self, "is_scrambled", 2)
yield Bits(self, "priority", 1)
yield Bit(self, "alignment")
yield Bit(self, "is_copyrighted")
yield Bit(self, "is_original")
yield Bit(self, "has_pts", "Presentation Time Stamp")
yield Bit(self, "has_dts", "Decode Time Stamp")
yield Bit(self, "has_escr", "Elementary Stream Clock Reference")
yield Bit(self, "has_es_rate", "Elementary Stream rate")
yield Bit(self, "dsm_trick_mode")
yield Bit(self, "has_copy_info")
yield Bit(self, "has_prev_crc", "If True, previous PES packet CRC follows")
yield Bit(self, "has_extension")
yield UInt8(self, "size")
# Time stamps
if self["has_pts"].value:
yield Bits(self, "sync[]", 4) # =2, or 3 if has_dts=True
yield Timestamp(self, "pts")
if self["has_dts"].value:
if not(self["has_pts"].value):
raise ParserError("Invalid PTS/DTS values")
yield Bits(self, "sync[]", 4) # =1
yield Timestamp(self, "dts")
if self["has_escr"].value:
yield Bits(self, "sync[]", 2) # =0
yield SCR(self, "escr")
if self["has_es_rate"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "es_rate", 14) # in units of 50 bytes/second
yield Bit(self, "sync[]") # =True
if self["has_copy_info"].value:
yield Bit(self, "sync[]") # =True
yield Bits(self, "copy_info", 7)
if self["has_prev_crc"].value:
yield textHandler(UInt16(self, "prev_crc"), hexadecimal)
# --- Extension ---
if self["has_extension"].value:
yield VideoExtension1(self, "extension")
if self["extension/has_extension2"].value:
yield VideoExtension2(self, "extension2")
class VideoExtension(FieldSet):
EXT_TYPE = {1:'Sequence',2:'Sequence Display',8:'Picture Coding'}
def createFields(self):
yield Enum(Bits(self, "ext_type", 4), self.EXT_TYPE)
ext_type=self['ext_type'].value
if ext_type==1:
# Sequence extension
yield Bits(self, 'profile_and_level', 8)
yield Bit(self, 'progressive_sequence')
yield Bits(self, 'chroma_format', 2)
yield Bits(self, 'horiz_size_ext', 2)
yield Bits(self, 'vert_size_ext', 2)
yield Bits(self, 'bit_rate_ext', 12)
yield Bits(self, 'pad[]', 1)
yield Bits(self, 'vbv_buffer_size_ext', 8)
yield Bit(self, 'low_delay')
yield Bits(self, 'frame_rate_ext_n', 2)
yield Bits(self, 'frame_rate_ext_d', 5)
elif ext_type==2:
# Sequence Display extension
yield Bits(self, 'video_format', 3)
yield Bit(self, 'color_desc_present')
if self['color_desc_present'].value:
yield UInt8(self, 'color_primaries')
yield UInt8(self, 'transfer_characteristics')
yield UInt8(self, 'matrix_coeffs')
yield Bits(self, 'display_horiz_size', 14)
yield Bits(self, 'pad[]', 1)
yield Bits(self, 'display_vert_size', 14)
yield NullBits(self, 'pad[]', 3)
elif ext_type==8:
yield Bits(self, 'f_code[0][0]', 4, description="forward horizontal")
yield Bits(self, 'f_code[0][1]', 4, description="forward vertical")
yield Bits(self, 'f_code[1][0]', 4, description="backward horizontal")
yield Bits(self, 'f_code[1][1]', 4, description="backward vertical")
yield Bits(self, 'intra_dc_precision', 2)
yield Bits(self, 'picture_structure', 2)
yield Bit(self, 'top_field_first')
yield Bit(self, 'frame_pred_frame_dct')
yield Bit(self, 'concealment_motion_vectors')
yield Bit(self, 'q_scale_type')
yield Bit(self, 'intra_vlc_format')
yield Bit(self, 'alternate_scan')
yield Bit(self, 'repeat_first_field')
yield Bit(self, 'chroma_420_type')
yield Bit(self, 'progressive_frame')
yield Bit(self, 'composite_display')
if self['composite_display'].value:
yield Bit(self, 'v_axis')
yield Bits(self, 'field_sequence', 3)
yield Bit(self, 'sub_carrier')
yield Bits(self, 'burst_amplitude', 7)
yield Bits(self, 'sub_carrier_phase', 8)
yield NullBits(self, 'pad[]', 2)
else:
yield NullBits(self, 'pad[]', 6)
else:
yield RawBits(self, "raw[]", 4)
class VideoPicture(FieldSet):
CODING_TYPE = ["forbidden","intra-coded (I)",
"predictive-coded (P)",
"bidirectionally-predictive-coded (B)",
"dc intra-coded (D)", "reserved",
"reserved", "reserved"]
def createFields(self):
yield Bits(self, "temporal_ref", 10)
yield Enum(Bits(self, "coding_type", 3), self.CODING_TYPE)
yield Bits(self, "vbv_delay", 16)
if self['coding_type'].value in (2,3):
# predictive coding
yield Bit(self, 'full_pel_fwd_vector')
yield Bits(self, 'forward_f_code', 3)
if self['coding_type'].value == 3:
# bidi predictive coding
yield Bit(self, 'full_pel_back_vector')
yield Bits(self, 'backward_f_code', 3)
yield Bits(self, "padding", 8-(self.current_size % 8))
class VideoSlice(FieldSet):
def createFields(self):
yield Bits(self, "quantizer_scale", 5)
start=self.absolute_address+self.current_size+3
pos=self.stream.searchBytes('\0\0\1',start,start+1024*1024*8) # seek forward by at most 1MB
if pos is None: pos=self.root.size
yield RawBits(self, "data", pos-start+3)
class VideoChunk(FieldSet):
tag_info = {
0x00: ("pict_start[]", VideoPicture, "Picture start"),
0xB2: ("data_start[]", None, "Data start"),
0xB3: ("seq_hdr[]", VideoSeqHeader,"Sequence header"),
0xB4: ("seq_err[]", None, "Sequence error"),
0xB5: ("ext_start[]", VideoExtension,"Extension start"),
0xB7: ("seq_end[]", None, "Sequence end"),
0xB8: ("group_start[]", GroupStart, "Group start"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
if not self.parser:
self.parser = defaultParser
elif 0x01 <= tag <= 0xaf:
self._name, self.parser, self._description = ('slice[]', VideoSlice, 'Picture slice')
else:
self.parser = defaultParser
def createFields(self):
yield Bytes(self, "sync", 3)
yield textHandler(UInt8(self, "tag"), hexadecimal)
if self.parser and self['tag'].value != 0xb7:
yield self.parser(self, "content")
class VideoStream(Parser):
endian = BIG_ENDIAN
def createFields(self):
while self.current_size < self.size:
pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB
if pos is not None:
padsize = pos-self.current_size
if padsize:
yield PaddingBytes(self, "pad[]", padsize//8)
yield VideoChunk(self, "chunk[]")
class Stream(FieldSet):
def createFields(self):
padding=0
position=0
while True:
next=ord(self.parent.stream.readBytes(self.absolute_address+self.current_size+position, 1))
if next == 0xff:
padding+=1
position+=8
elif padding:
yield PaddingBytes(self, "pad[]", padding)
padding=None
position=0
elif 0x40 <= next <= 0x7f:
yield Bits(self, "scale_marker", 2) # 1
yield Bit(self, "scale")
scale=self['scale'].value
if scale:
scaleval=1024
else:
scaleval=128
yield textHandler(Bits(self, "size", 13), lambda field:str(field.value*scaleval))
elif 0x00 <= next <= 0x3f:
yield Bits(self, "ts_marker", 2) # 0
yield Bit(self, "has_pts")
yield Bit(self, "has_dts")
if self['has_pts'].value:
yield Timestamp(self, "pts")
if self['has_dts'].value:
yield PaddingBits(self, "pad[]", 4)
yield Timestamp(self, "dts")
if self.current_size % 8 == 4:
yield PaddingBits(self, "pad[]", 4)
break
elif 0x80 <= next <= 0xbf:
# MPEG-2 extension
yield PacketElement(self, "pkt")
break
else:
# 0xc0 - 0xfe: unknown
break
length = self["../length"].value - self.current_size//8
if length:
tag=self['../tag'].value
group=self.root.streamgroups[tag]
parname=self.parent._name
if parname.startswith('audio'):
frag = CustomFragment(self, "data", length*8, MpegAudioFile, group=group)
elif parname.startswith('video'):
frag = CustomFragment(self, "data", length*8, VideoStream, group=group)
else:
frag = CustomFragment(self, "data", length*8, None, group=group)
self.root.streamgroups[tag]=frag.group
yield frag
class Chunk(FieldSet):
ISO_END_CODE = 0xB9
tag_info = {
0xB9: ("end", None, "End"),
0xBA: ("pack_start[]", PackHeader, "Pack start"),
0xBB: ("system_start[]", SystemHeader, "System start"),
# streams
0xBD: ("private[]", Stream, "Private elementary"),
0xBE: ("padding[]", Stream, "Padding"),
# 0xC0 to 0xFE handled specially
0xFF: ("directory[]", Stream, "Program Stream Directory"),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
if not hasattr(self.root,'streamgroups'):
self.root.streamgroups={}
for tag in range(0xBC, 0x100):
self.root.streamgroups[tag]=None
tag = self["tag"].value
if tag in self.tag_info:
self._name, self.parser, self._description = self.tag_info[tag]
elif 0xBC <= tag <= 0xFF:
if 0xC0 <= tag < 0xE0:
# audio
streamid = tag-0xC0
self._name, self.parser, self._description = ("audio[%i][]"%streamid, Stream, "Audio Stream %i Packet"%streamid)
elif 0xE0 <= tag < 0xF0:
# video
streamid = tag-0xE0
self._name, self.parser, self._description = ("video[%i][]"%streamid, Stream, "Video Stream %i Packet"%streamid)
else:
self._name, self.parser, self._description = ("stream[]", Stream, "Data Stream Packet")
else:
self.parser = defaultParser
if not self.parser:
self.parser = defaultParser
elif self.parser != PackHeader and "length" in self:
self._size = (6 + self["length"].value) * 8
def createFields(self):
yield Bytes(self, "sync", 3)
yield textHandler(UInt8(self, "tag"), hexadecimal)
if self.parser:
if self.parser != PackHeader:
yield UInt16(self, "length")
if not self["length"].value:
return
yield self.parser(self, "content")
def createDescription(self):
return "Chunk: tag %s" % self["tag"].display
class MPEGVideoFile(Parser):
PARSER_TAGS = {
"id": "mpeg_video",
"category": "video",
"file_ext": ("mpeg", "mpg", "mpe", "vob"),
"mime": (u"video/mpeg", u"video/mp2p"),
"min_size": 12*8,
#TODO: "magic": xxx,
"description": "MPEG video, version 1 or 2"
}
endian = BIG_ENDIAN
version = None
def createFields(self):
while self.current_size < self.size:
pos=self.stream.searchBytes('\0\0\1',self.current_size,self.current_size+1024*1024*8) # seek forward by at most 1MB
if pos is not None:
padsize = pos-self.current_size
if padsize:
yield PaddingBytes(self, "pad[]", padsize//8)
chunk=Chunk(self, "chunk[]")
try:
# force chunk to be processed, so that CustomFragments are complete
chunk['content/data']
except: pass
yield chunk
def validate(self):
try:
pack = self[0]
except FieldError:
return "Unable to create first chunk"
if pack.name != "pack_start[0]":
return "Invalid first chunk"
if pack["sync"].value != "\0\0\1":
return "Invalid synchronisation"
return pack["content"].validate()
def getVersion(self):
if not self.version:
if self["pack_start[0]/content/sync[0]"].size == 2:
self.version = 2
else:
self.version = 1
return self.version
def createDescription(self):
if self.getVersion() == 2:
return "MPEG-2 video"
else:
return "MPEG-1 video"
| gpl-3.0 |
kevinmel2000/sl4a | python-build/python-libs/gdata/src/gdata/photos/service.py | 162 | 24363 | #!/usr/bin/env python
# -*-*- encoding: utf-8 -*-*-
#
# This is the service file for the Google Photo python client.
# It is used for higher level operations.
#
# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google PhotoService provides a human-friendly interface to
Google Photo (a.k.a Picasa Web) services[1].
It extends gdata.service.GDataService and as such hides all the
nasty details about authenticating, parsing and communicating with
Google Photos.
[1]: http://code.google.com/apis/picasaweb/gdata.html
Example:
import gdata.photos, gdata.photos.service
pws = gdata.photos.service.PhotosService()
pws.ClientLogin(username, password)
#Get all albums
albums = pws.GetUserFeed().entry
# Get all photos in second album
photos = pws.GetFeed(albums[1].GetPhotosUri()).entry
# Get all tags for photos in second album and print them
tags = pws.GetFeed(albums[1].GetTagsUri()).entry
print [ tag.summary.text for tag in tags ]
# Get all comments for the first photos in list and print them
comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry
print [ c.summary.text for c in comments ]
# Get a photo to work with
photo = photos[0]
# Update metadata
# Attributes from the <gphoto:*> namespace
photo.summary.text = u'A nice view from my veranda'
photo.title.text = u'Verandaview.jpg'
# Attributes from the <media:*> namespace
photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated
# Adding attributes to media object
# Rotate 90 degrees clockwise
photo.rotation = gdata.photos.Rotation(text='90')
# Submit modified photo object
photo = pws.UpdatePhotoMetadata(photo)
# Make sure you only modify the newly returned object, else you'll get
# versioning errors. See Optimistic-concurrency
# Add comment to a picture
comment = pws.InsertComment(photo, u'I wish the water always was this warm')
# Remove comment because it was silly
print "*blush*"
pws.Delete(comment.GetEditLink().href)
"""
__author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
__version__ = '$Revision: 176 $'[11:-2]
import sys, os.path, StringIO
import time
import gdata.service
import gdata
import atom.service
import atom
import gdata.photos
SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png')
UNKOWN_ERROR=1000
GPHOTOS_BAD_REQUEST=400
GPHOTOS_CONFLICT=409
GPHOTOS_INTERNAL_SERVER_ERROR=500
GPHOTOS_INVALID_ARGUMENT=601
GPHOTOS_INVALID_CONTENT_TYPE=602
GPHOTOS_NOT_AN_IMAGE=603
GPHOTOS_INVALID_KIND=604
class GooglePhotosException(Exception):
def __init__(self, response):
self.error_code = response['status']
self.reason = response['reason'].strip()
if '<html>' in str(response['body']): #general html message, discard it
response['body'] = ""
self.body = response['body'].strip()
self.message = "(%(status)s) %(body)s -- %(reason)s" % response
#return explicit error codes
error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE,
'kind: That is not one of the acceptable values':
GPHOTOS_INVALID_KIND,
}
for msg, code in error_map.iteritems():
if self.body == msg:
self.error_code = code
break
self.args = [self.error_code, self.reason, self.body]
class PhotosService(gdata.service.GDataService):
userUri = '/data/feed/api/user/%s'
def __init__(self, email=None, password=None, source=None,
server='picasaweb.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Photos service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'picasaweb.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
self.email = email
self.client = source
gdata.service.GDataService.__init__(
self, email=email, password=password, service='lh2', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetFeed(self, uri, limit=None, start_index=None):
"""Get a feed.
The results are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
uri: the uri to fetch
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumFeed,
gdata.photos.UserFeed,
gdata.photos.PhotoFeed,
gdata.photos.CommentFeed,
gdata.photos.TagFeed,
depending on the results of the query.
Raises:
GooglePhotosException
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyFeedFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetEntry(self, uri, limit=None, start_index=None):
"""Get an Entry.
Arguments:
uri: the uri to the entry
limit (optional): the maximum number of entries to return. Defaults to what
the server returns.
Returns:
one of gdata.photos.AlbumEntry,
gdata.photos.UserEntry,
gdata.photos.PhotoEntry,
gdata.photos.CommentEntry,
gdata.photos.TagEntry,
depending on the results of the query.
Raises:
GooglePhotosException
"""
if limit is not None:
uri += '&max-results=%s' % limit
if start_index is not None:
uri += '&start-index=%s' % start_index
try:
return self.Get(uri, converter=gdata.photos.AnyEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetUserFeed(self, kind='album', user='default', limit=None):
"""Get user-based feed, containing albums, photos, comments or tags;
defaults to albums.
The entries are ordered by the values of their `updated' elements,
with the most recently updated entry appearing first in the feed.
Arguments:
kind: the kind of entries to get, either `album', `photo',
`comment' or `tag', or a python list of these. Defaults to `album'.
user (optional): whose albums we're querying. Defaults to current user.
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed, containing appropriate Entry elements
See:
http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html
"""
if isinstance(kind, (list, tuple) ):
kind = ",".join(kind)
uri = '/data/feed/api/user/%s?kind=%s' % (user, kind)
return self.GetFeed(uri, limit=limit)
def GetTaggedPhotos(self, tag, user='default', limit=None):
"""Get all photos belonging to a specific user, tagged by the given keyword
Arguments:
tag: The tag you're looking for, e.g. `dog'
user (optional): Whose images/videos you want to search, defaults
to current user
limit (optional): the maximum number of entries to return.
Defaults to everything the server returns.
Returns:
gdata.photos.UserFeed containing PhotoEntry elements
"""
# Lower-casing because of
# http://code.google.com/p/gdata-issues/issues/detail?id=194
uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower())
return self.GetFeed(uri, limit)
def SearchUserPhotos(self, query, user='default', limit=100):
"""Search through all photos for a specific user and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
user (optional): The username of whose photos you want to search, defaults
to current user.
limit (optional): Don't return more than `limit' hits, defaults to 100
Only public photos are searched, unless you are authenticated and
searching through your own photos.
Returns:
gdata.photos.UserFeed with PhotoEntry elements
"""
uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query)
return self.GetFeed(uri, limit=limit)
def SearchCommunityPhotos(self, query, limit=100):
"""Search through all public photos and return a feed.
This will look for matches in file names and image tags (a.k.a. keywords)
Arguments:
query: The string you're looking for, e.g. `vacation'
limit (optional): Don't return more than `limit' hits, defaults to 100
Returns:
gdata.GDataFeed with PhotoEntry elements
"""
uri='/data/feed/api/all?q=%s' % query
return self.GetFeed(uri, limit=limit)
def GetContacts(self, user='default', limit=None):
"""Retrieve a feed that contains a list of your contacts
Arguments:
user: Username of the user whose contacts you want
Returns
gdata.photos.UserFeed, with UserEntry entries
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=user' % user
return self.GetFeed(uri, limit=limit)
def SearchContactsPhotos(self, user='default', search=None, limit=None):
"""Search over your contacts' photos and return a feed
Arguments:
user: Username of the user whose contacts you want
search (optional): What to search for (photo title, description and keywords)
Returns
gdata.photos.UserFeed, with PhotoEntry elements
See:
http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
"""
uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search)
return self.GetFeed(uri, limit=limit)
def InsertAlbum(self, title, summary, location=None, access='public',
commenting_enabled='true', timestamp=None):
"""Add an album.
Needs authentication, see self.ClientLogin()
Arguments:
title: Album title
summary: Album summary / description
access (optional): `private' or `public'. Public albums are searchable
by everyone on the internet. Defaults to `public'
commenting_enabled (optional): `true' or `false'. Defaults to `true'.
timestamp (optional): A date and time for the album, in milliseconds since
Unix epoch[1] UTC. Defaults to now.
Returns:
The newly created gdata.photos.AlbumEntry
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
album = gdata.photos.AlbumEntry()
album.title = atom.Title(text=title, title_type='text')
album.summary = atom.Summary(text=summary, summary_type='text')
if location is not None:
album.location = gdata.photos.Location(text=location)
album.access = gdata.photos.Access(text=access)
if commenting_enabled in ('true', 'false'):
album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled)
if timestamp is None:
timestamp = '%i' % int(time.time() * 1000)
album.timestamp = gdata.photos.Timestamp(text=timestamp)
try:
return self.Post(album, uri=self.userUri % self.email,
converter=gdata.photos.AlbumEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhoto(self, album_or_uri, photo, filename_or_handle,
content_type='image/jpeg'):
"""Add a PhotoEntry
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
photo: PhotoEntry to add
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
"""
try:
assert(isinstance(photo, gdata.photos.PhotoEntry))
except AssertionError:
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`photo` must be a gdata.photos.PhotoEntry instance',
'reason':'Found %s, not PhotoEntry' % type(photo)
})
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
mediasource = gdata.MediaSource()
mediasource.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or object with a .read() method' % \
type(filename_or_handle)
})
if isinstance(album_or_uri, (str, unicode)): # it's a uri
feed_uri = album_or_uri
elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object
feed_uri = album_or_uri.GetFeedLink().href
try:
return self.Post(photo, uri=feed_uri, media_source=mediasource,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle,
content_type='image/jpeg', keywords=None):
"""Add a photo without constructing a PhotoEntry.
Needs authentication, see self.ClientLogin()
Arguments:
album_or_uri: AlbumFeed or uri of the album where the photo should go
title: Photo title
summary: Photo summary / description
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
keywords (optional): a 1) comma separated string or 2) a python list() of
keywords (a.k.a. tags) to add to the image.
E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation']
Returns:
The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors
See:
http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
[1]: http://en.wikipedia.org/wiki/Unix_epoch
"""
metadata = gdata.photos.PhotoEntry()
metadata.title=atom.Title(text=title)
metadata.summary = atom.Summary(text=summary, summary_type='text')
if keywords is not None:
if isinstance(keywords, list):
keywords = ','.join(keywords)
metadata.media.keywords = gdata.media.Keywords(text=keywords)
return self.InsertPhoto(album_or_uri, metadata, filename_or_handle,
content_type)
def UpdatePhotoMetadata(self, photo):
"""Update a photo's metadata.
Needs authentication, see self.ClientLogin()
You can update any or all of the following metadata properties:
* <title>
* <media:description>
* <gphoto:checksum>
* <gphoto:client>
* <gphoto:rotation>
* <gphoto:timestamp>
* <gphoto:commentingEnabled>
Arguments:
photo: a gdata.photos.PhotoEntry object with updated elements
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(uri).entry[0]
p.title.text = u'My new text'
p.commentingEnabled.text = 'false'
p = UpdatePhotoMetadata(p)
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
return self.Put(data=photo, uri=photo.GetEditLink().href,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle,
content_type = 'image/jpeg'):
"""Update a photo's binary data.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a
`edit-media' uri pointing to it
filename_or_handle: A file-like object or file name where the image/video
will be read from
content_type (optional): Internet media type (a.k.a. mime type) of
media object. Currently Google Photos supports these types:
o image/bmp
o image/gif
o image/jpeg
o image/png
Images will be converted to jpeg on upload. Defaults to `image/jpeg'
Returns:
The modified gdata.photos.PhotoEntry
Example:
p = GetFeed(PhotoUri)
p = UpdatePhotoBlob(p, '/tmp/newPic.jpg')
It is important that you don't keep the old object around, once
it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
"""
try:
majtype, mintype = content_type.split('/')
assert(mintype in SUPPORTED_UPLOAD_TYPES)
except (ValueError, AssertionError):
raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE,
'body':'This is not a valid content type: %s' % content_type,
'reason':'Accepted content types: %s' % \
['image/'+t for t in SUPPORTED_UPLOAD_TYPES]
})
if isinstance(filename_or_handle, (str, unicode)) and \
os.path.exists(filename_or_handle): # it's a file name
photoblob = gdata.MediaSource()
photoblob.setFile(filename_or_handle, content_type)
elif hasattr(filename_or_handle, 'read'):# it's a file-like resource
if hasattr(filename_or_handle, 'seek'):
filename_or_handle.seek(0) # rewind pointer to the start of the file
# gdata.MediaSource needs the content length, so read the whole image
file_handle = StringIO.StringIO(filename_or_handle.read())
name = 'image'
if hasattr(filename_or_handle, 'name'):
name = filename_or_handle.name
mediasource = gdata.MediaSource(file_handle, content_type,
content_length=file_handle.len, file_name=name)
else: #filename_or_handle is not valid
raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT,
'body':'`filename_or_handle` must be a path name or a file-like object',
'reason':'Found %s, not path name or an object with .read() method' % \
type(filename_or_handle)
})
if isinstance(photo_or_uri, (str, unicode)):
entry_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
entry_uri = photo_or_uri.GetEditMediaLink().href
try:
return self.Put(photoblob, entry_uri,
converter=gdata.photos.PhotoEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertTag(self, photo_or_uri, tag):
"""Add a tag (a.k.a. keyword) to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a
`post' uri pointing to it
(string) tag: The tag/keyword
Returns:
The new gdata.photos.TagEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertTag(p, 'Beautiful sunsets')
"""
tag = gdata.photos.TagEntry(title=atom.Title(text=tag))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=tag, uri=post_uri,
converter=gdata.photos.TagEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def InsertComment(self, photo_or_uri, comment):
"""Add a comment to a photo.
Needs authentication, see self.ClientLogin()
Arguments:
photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented
, or a `post' uri pointing to it
(string) comment: The actual comment
Returns:
The new gdata.photos.CommentEntry
Example:
p = GetFeed(PhotoUri)
tag = InsertComment(p, 'OOOH! I would have loved to be there.
Who's that in the back?')
"""
comment = gdata.photos.CommentEntry(content=atom.Content(text=comment))
if isinstance(photo_or_uri, (str, unicode)):
post_uri = photo_or_uri # it's a uri
elif hasattr(photo_or_uri, 'GetEditMediaLink'):
post_uri = photo_or_uri.GetPostLink().href
try:
return self.Post(data=comment, uri=post_uri,
converter=gdata.photos.CommentEntryFromString)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def Delete(self, object_or_uri, *args, **kwargs):
"""Delete an object.
Re-implementing the GDataService.Delete method, to add some
convenience.
Arguments:
object_or_uri: Any object that has a GetEditLink() method that
returns a link, or a uri to that object.
Returns:
? or GooglePhotosException on errors
"""
try:
uri = object_or_uri.GetEditLink().href
except AttributeError:
uri = object_or_uri
try:
return gdata.service.GDataService.Delete(self, uri, *args, **kwargs)
except gdata.service.RequestError, e:
raise GooglePhotosException(e.args[0])
def GetSmallestThumbnail(media_thumbnail_list):
"""Helper function to get the smallest thumbnail of a list of
gdata.media.Thumbnail.
Returns gdata.media.Thumbnail """
r = {}
for thumb in media_thumbnail_list:
r[int(thumb.width)*int(thumb.height)] = thumb
keys = r.keys()
keys.sort()
return r[keys[0]]
def ConvertAtomTimestampToEpoch(timestamp):
"""Helper function to convert a timestamp string, for instance
from atom:updated or atom:published, to milliseconds since Unix epoch
(a.k.a. POSIX time).
`2007-07-22T00:45:10.000Z' -> """
return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z'))
## TODO: Timezone aware
| apache-2.0 |
gg7/sentry | tests/sentry/api/endpoints/test_project_member_index.py | 23 | 1234 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase
class ProjectMemberIndexTest(APITestCase):
def test_simple(self):
user_1 = self.create_user('foo@localhost', username='foo')
user_2 = self.create_user('bar@localhost', username='bar')
user_3 = self.create_user('baz@localhost', username='baz')
org = self.create_organization(owner=user_1)
team = self.create_team(organization=org, slug='baz')
project_1 = self.create_project(team=team, slug='foo')
project_2 = self.create_project(team=team, slug='bar')
org.member_set.create(user=user_2, has_global_access=True)
org.member_set.create(user=user_3, has_global_access=False)
self.login_as(user=user_1)
url = reverse('sentry-api-0-project-member-index', kwargs={
'organization_slug': project_1.organization.slug,
'project_slug': project_1.slug,
})
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 2
assert response.data[0]['email'] == user_2.email
assert response.data[1]['email'] == user_1.email
| bsd-3-clause |
voriux/Flexget | flexget/plugins/output/exec.py | 3 | 7367 | from __future__ import unicode_literals, division, absolute_import
from collections import Mapping
import logging
import subprocess
from flexget import plugin
from flexget.event import event
from flexget.utils.template import render_from_entry, render_from_task, RenderError
from flexget.utils.tools import io_encoding
log = logging.getLogger('exec')
class EscapingDict(Mapping):
"""Helper class, same as a dict, but returns all string value with quotes escaped."""
def __init__(self, mapping):
self._data = mapping
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
value = self._data[key]
if isinstance(value, basestring):
# TODO: May need to be different depending on OS
value = value.replace('"', '\\"')
#value = re.escape(value)
return value
class PluginExec(object):
"""
Execute commands
Simple example, xecute command for entries that reach output::
exec: echo 'found {{title}} at {{url}}' > file
Advanced Example::
exec:
on_start:
phase: echo "Started"
on_input:
for_entries: echo 'got {{title}}'
on_output:
for_accepted: echo 'accepted {{title}} - {{url}} > file
You can use all (available) entry fields in the command.
"""
NAME = 'exec'
HANDLED_PHASES = ['start', 'input', 'filter', 'output', 'exit']
schema = {
'oneOf': [
{'type': 'string'},
{
'type': 'object',
'properties': {
'on_start': {'$ref': '#/definitions/phaseSettings'},
'on_input': {'$ref': '#/definitions/phaseSettings'},
'on_filter': {'$ref': '#/definitions/phaseSettings'},
'on_output': {'$ref': '#/definitions/phaseSettings'},
'on_exit': {'$ref': '#/definitions/phaseSettings'},
'fail_entries': {'type': 'boolean'},
'auto_escape': {'type': 'boolean'},
'encoding': {'type': 'string'},
'allow_background': {'type': 'boolean'}
},
'additionalProperties': False
}
],
'definitions': {
'phaseSettings': {
'type': 'object',
'properties': {
'phase': {'type': 'string'},
'for_entries': {'type': 'string'},
'for_accepted': {'type': 'string'},
'for_rejected': {'type': 'string'},
'for_failed': {'type': 'string'}
},
'additionalProperties': False
}
}
}
def prepare_config(self, config):
if isinstance(config, basestring):
config = {'on_output': {'for_accepted': config}}
if not config.get('encoding'):
config['encoding'] = io_encoding
return config
def execute_cmd(self, cmd, allow_background, encoding):
log.verbose('Executing: %s' % cmd)
p = subprocess.Popen(cmd.encode(encoding), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=False)
if not allow_background:
(r, w) = (p.stdout, p.stdin)
response = r.read().decode(encoding, 'replace')
r.close()
w.close()
if response:
log.info('Stdout: %s' % response)
return p.wait()
def execute(self, task, phase_name, config):
config = self.prepare_config(config)
if not phase_name in config:
log.debug('phase %s not configured' % phase_name)
return
name_map = {'for_entries': task.entries, 'for_accepted': task.accepted,
'for_rejected': task.rejected, 'for_failed': task.failed}
allow_background = config.get('allow_background')
for operation, entries in name_map.iteritems():
if not operation in config[phase_name]:
continue
log.debug('running phase_name: %s operation: %s entries: %s' % (phase_name, operation, len(entries)))
for entry in entries:
cmd = config[phase_name][operation]
entrydict = EscapingDict(entry) if config.get('auto_escape') else entry
# Do string replacement from entry, but make sure quotes get escaped
try:
cmd = render_from_entry(cmd, entrydict)
except RenderError as e:
log.error('Could not set exec command for %s: %s' % (entry['title'], e))
# fail the entry if configured to do so
if config.get('fail_entries'):
entry.fail('Entry `%s` does not have required fields for string replacement.' % entry['title'])
continue
log.debug('phase_name: %s operation: %s cmd: %s' % (phase_name, operation, cmd))
if task.options.test:
log.info('Would execute: %s' % cmd)
else:
# Make sure the command can be encoded into appropriate encoding, don't actually encode yet,
# so logging continues to work.
try:
cmd.encode(config['encoding'])
except UnicodeEncodeError:
log.error('Unable to encode cmd `%s` to %s' % (cmd, config['encoding']))
if config.get('fail_entries'):
entry.fail('cmd `%s` could not be encoded to %s.' % (cmd, config['encoding']))
continue
# Run the command, fail entries with non-zero return code if configured to
if self.execute_cmd(cmd, allow_background, config['encoding']) != 0 and config.get('fail_entries'):
entry.fail('exec return code was non-zero')
# phase keyword in this
if 'phase' in config[phase_name]:
cmd = config[phase_name]['phase']
try:
cmd = render_from_task(cmd, task)
except RenderError as e:
log.error('Error rendering `%s`: %s' % (cmd, e))
else:
log.debug('phase cmd: %s' % cmd)
if task.options.test:
log.info('Would execute: %s' % cmd)
else:
self.execute_cmd(cmd, allow_background, config['encoding'])
def __getattr__(self, item):
"""Creates methods to handle task phases."""
for phase in self.HANDLED_PHASES:
if item == plugin.phase_methods[phase]:
# A phase method we handle has been requested
break
else:
# We don't handle this phase
raise AttributeError(item)
def phase_handler(task, config):
self.execute(task, 'on_' + phase, config)
# Make sure we run after other plugins so exec can use their output
phase_handler.priority = 100
return phase_handler
@event('plugin.register')
def register_plugin():
plugin.register(PluginExec, 'exec', api_ver=2)
| mit |
staute/shinken-mod-livestatus | module/livestatus_query_metainfo.py | 3 | 25571 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import re
import time
from counter import Counter
from livestatus_stack import LiveStatusStack
from shinken.log import logger
def has_not_more_than(list1, list2):
return len(set(list1).difference(set(list2))) == 0
"""
There are several categories for queries. Their main difference is the kind
of event which invalidates the corresponding cache.
- CACHE_IMPOSSIBLE
- CACHE_PROGRAM_STATIC
applies to queries which ask for version numbers etc.
- CACHE_GLOBAL_STATS
applies to stats queries which ask for state only and don't refer to a specific host/service.
invalidated by a state change (check_result_brok)
- CACHE_GLOBAL_STATS_WITH_STATETYPE
the same, but takes hard/soft states into account.
invalidated by a state change
invalidated by a state_type change
- CACHE_GLOBAL_STATS_WITH_STATUS
applies to queries which ask for status (in_downtime/active/passive/...)
invalidated by changes in status update broks
- CACHE_HOST_STATS
- CACHE_SERVICE_STATS
- CACHE_IRREVERSIBLE_HISTORY
applies to queries which want to read log data from a time in the past.
If the columns are not of any type which can change later, this query
can be cached forever.
"""
CACHE_IMPOSSIBLE = 0
CACHE_PROGRAM_STATIC = 1
CACHE_GLOBAL_STATS = 2
CACHE_GLOBAL_STATS_WITH_STATETYPE = 3
CACHE_HOST_STATS = 4
CACHE_SERVICE_STATS = 5
CACHE_IRREVERSIBLE_HISTORY = 6
"""
Sometimes it is possible to see from the list of filters that this query's purpose
s to find one specific host or service (ot the services of one specific host).
The service is therefore tagged with a hint type, helping an upper layer to limit
the number of objects to process.
"""
HINT_NONE = 0
HINT_HOST = 1
HINT_HOSTS = 2
HINT_SERVICES_BY_HOST = 3
HINT_SERVICE = 4
HINT_SERVICES_BY_HOSTS = 5
HINT_SERVICES = 6
HINT_HOSTS_BY_GROUP = 7
HINT_SERVICES_BY_HOSTGROUP = 8
HINT_SERVICES_BY_GROUP = 9
class LiveStatusQueryMetainfoFilterStack(LiveStatusStack):
"""
This is a filterstack which produces a text representation of
a and/or-filter-tree, similar to sql.
It can be used some time for text analysis.
"""
def __init__(self, *args, **kw):
self.type = 'text'
self.__class__.__bases__[0].__init__(self, *args, **kw)
def not_elements(self):
top_filter = self.get_stack()
negate_filter = '(NOT ' + top_filter + ')'
self.put_stack(negate_filter)
def and_elements(self, num):
"""Take num filters from the stack, and them and put the result back"""
if num > 1:
filters = []
for _ in range(num):
filters.append(self.get_stack())
and_filter = '(' + (' AND ').join(filters) + ')'
self.put_stack(and_filter)
def or_elements(self, num):
"""Take num filters from the stack, or them and put the result back"""
if num > 1:
filters = []
for _ in range(num):
filters.append(self.get_stack())
or_filter = '(' + (' OR ').join(filters) + ')'
self.put_stack(or_filter)
def get_stack(self):
"""Return the top element from the stack or a filter which is always true"""
if self.qsize() == 0:
return ''
else:
return self.get()
class LiveStatusQueryMetainfo(object):
"""
This class implements a more "machine-readable" form of a livestatus query.
The lines of a query text are split up in a list of tuples,
where the first element is the lql statement and the remaining elements
are columns, attributes, operators etc.
It's main purpose is to provide methods which are used to rank the query
in specific categories.
"""
def __init__(self, data):
self.data = data
self.cache_category = CACHE_IMPOSSIBLE
self.query_hints = {
'target': HINT_NONE,
}
self.table = 'TABLE_IS_NOT_SET'
self.keyword_counter = Counter()
self.metainfo_filter_stack = LiveStatusQueryMetainfoFilterStack()
self.structure(data)
self.key = hash(str(self.structured_data))
self.is_stats = self.keyword_counter['Stats'] > 0
self.client_localtime = int(time.time())
self.stats_columns = [f[1] for f in self.structured_data if f[0] == 'Stats']
self.filter_columns = [f[1] for f in self.structured_data if f[0] == 'Filter']
self.columns = [x for f in self.structured_data if f[0] == 'Columns' for x in f[1]]
self.categorize()
def __str__(self):
text = "table %s\n" % self.table
text += "columns %s\n" % self.columns
text += "stats_columns %s\n" % self.stats_columns
text += "filter_columns %s\n" % list(set(self.filter_columns))
text += "is_stats %s\n" % self.is_stats
text += "is_cacheable %s\n" % str(self.cache_category != CACHE_IMPOSSIBLE)
return text
def add_filter(self, operator, attribute, reference):
self.metainfo_filter_stack.put_stack(self.make_text_filter(operator, attribute, reference))
def add_filter_and(self, andnum):
self.metainfo_filter_stack.and_elements(andnum)
def add_filter_or(self, ornum):
self.metainfo_filter_stack.or_elements(ornum)
def add_filter_not(self):
self.metainfo_filter_stack.not_elements()
def make_text_filter(self, operator, attribute, reference):
return '%s%s%s' % (attribute, operator, reference)
def structure(self, data):
"""
Reformat the lines of a query so that they are a list of tuples
where the first element is the keyword
"""
self.structured_data = []
for line in data.splitlines():
line = line.strip()
# Tools like NagVis send KEYWORK:option, and we prefer to have
# a space following the:
if ':' in line and not ' ' in line:
line = line.replace(':', ': ')
keyword = line.split(' ')[0].rstrip(':')
if keyword == 'GET':
self.table = self.split_command(line)[1]
self.structured_data.append((keyword, self.split_command(line)[1]))
elif keyword == 'Columns': # Get the names of the desired columns
_, columns = self.split_option_with_columns(line)
self.structured_data.append((keyword, columns))
elif keyword == 'ResponseHeader':
_, responseheader = self.split_option(line)
self.structured_data.append((keyword, responseheader))
elif keyword == 'OutputFormat':
_, outputformat = self.split_option(line)
self.structured_data.append((keyword, outputformat))
elif keyword == 'KeepAlive':
_, keepalive = self.split_option(line)
self.structured_data.append((keyword, keepalive))
elif keyword == 'ColumnHeaders':
_, columnheaders = self.split_option(line)
self.structured_data.append((keyword, columnheaders))
elif keyword == 'Limit':
_, limit = self.split_option(line)
self.structured_data.append((keyword, limit))
elif keyword == 'AuthUser':
_, authuser = self.split_option(line)
self.structured_data.append((keyword, authuser))
self.query_hints['authuser'] = authuser
elif keyword == 'Filter':
try:
_, attribute, operator, reference = re.split(r"[\s]+", line, 3)
except Exception:
_, attribute, operator = re.split(r"[\s]+", line, 2)
reference = ''
self.metainfo_filter_stack.put_stack(self.make_text_filter(operator, attribute, reference))
if reference != '_REALNAME':
attribute = self.strip_table_from_column(attribute)
self.structured_data.append((keyword, attribute, operator, reference))
elif keyword == 'And':
_, andnum = self.split_option(line)
self.structured_data.append((keyword, andnum))
self.metainfo_filter_stack.and_elements(andnum)
elif keyword == 'Or':
_, ornum = self.split_option(line)
self.structured_data.append((keyword, ornum))
self.metainfo_filter_stack.or_elements(ornum)
elif keyword == 'Negate':
self.structured_data.append((keyword,))
self.metainfo_filter_stack.not_elements()
elif keyword == 'StatsGroupBy':
_, columns = self.split_option_with_columns(line)
self.structured_data.append((keyword, columns))
elif keyword == 'Stats':
try:
_, attribute, operator, reference = self.split_option(line, 3)
if attribute in ['sum', 'min', 'max', 'avg', 'std'] and reference.startswith('as '):
attribute, operator = operator, attribute
elif attribute in ['sum', 'min', 'max', 'avg', 'std'] and reference == '=':
attribute, operator = operator, attribute
reference = ''
except Exception:
_, attribute, operator = self.split_option(line, 3)
if attribute in ['sum', 'min', 'max', 'avg', 'std']:
attribute, operator = operator, attribute
reference = ''
self.structured_data.append((keyword, attribute, operator, reference))
elif keyword == 'StatsAnd':
_, andnum = self.split_option(line)
self.structured_data.append((keyword, andnum))
elif keyword == 'StatsOr':
_, ornum = self.split_option(line)
self.structured_data.append((keyword, ornum))
elif keyword == 'Separators':
_, sep1, sep2, sep3, sep4 = line.split(' ', 5)
self.structured_data.append((keyword, sep1, sep2, sep3, sep4))
elif keyword == 'Localtime':
_, self.client_localtime = self.split_option(line)
# NO # self.structured_data.append((keyword, client_localtime))
else:
logger.warning("[Livestatus Query Metainfo] Received a line of input which i can't handle: '%s'" % line)
self.structured_data.append((keyword, 'Received a line of input which i can\'t handle: %s' % line))
self.keyword_counter[keyword] += 1
self.metainfo_filter_stack.and_elements(self.metainfo_filter_stack.qsize())
self.flat_filter = self.metainfo_filter_stack.get_stack()
def split_command(self, line, splits=1):
"""Create a list from the words of a line"""
return line.split(' ', splits)
def split_option(self, line, splits=1):
"""Like split_commands, but converts numbers to int data type"""
x = map(lambda i: (i.isdigit() and int(i)) or i, [token.strip() for token in re.split(r"[\s]+", line, splits)])
return x
def split_option_with_columns(self, line):
"""Split a line in a command and a list of words"""
cmd, columns = self.split_option(line)
return cmd, [c for c in re.compile(r'\s+').split(columns)]
def strip_table_from_column(self, column):
"""Cut off the table name, because it is possible
to say service_state instead of state"""
bygroupmatch = re.compile('(\w+)by.*group').search(self.table)
if bygroupmatch:
return re.sub(re.sub('s$', '', bygroupmatch.group(1)) + '_', '', column, 1)
else:
return re.sub(re.sub('s$', '', self.table) + '_', '', column, 1)
def is_a_closed_chapter(self):
"""
When the query is asking for log events from a time interval in the
past, we can assume that the response will be a good candidate for
caching. A precondition is, that only attributes are involved, which
can not change over time. (ex. current_host_num_critical_services)
"""
logline_elements = ['attempt', 'class', 'command_name', 'comment', 'contact_name', 'host_name', 'message', 'options', 'plugin_output', 'service_description', 'state', 'state_type', 'time', 'type']
logline_elements.extend(['current_host_groups', 'current_service_groups'])
if self.table == 'log':
limits = sorted([(f[2], int(f[3])) for f in self.structured_data if f[0] == 'Filter' and f[1] == 'time'], key=lambda x: x[1])
if len(limits) == 2 and limits[1][1] <= int(time.time()) and limits[0][0].startswith('>') and limits[1][0].startswith('<'):
if has_not_more_than(self.columns, logline_elements):
return True
return False
def categorize(self):
"""
Analyze the formalized query (which table, which columns, which
filters, stats or not,...) and find a suitable cache_category.
"""
# self.table, self.structured_data
if self.table == 'status' and has_not_more_than(self.columns, ['livestatus_version', 'program_version', 'program_start']):
self.cache_category = CACHE_PROGRAM_STATIC
elif not self.keyword_counter['Filter'] and self.table == 'host' and has_not_more_than(self.columns, ['name', 'custom_variable_names', 'custom_variable_values', 'services']):
self.cache_category = CACHE_GLOBAL_STATS
elif self.table == 'log' and self.is_stats and has_not_more_than(self.stats_columns, ['state']):
# and only 1 timefilter which is >=
self.cache_category = CACHE_GLOBAL_STATS
elif self.table == 'services' and self.is_stats and has_not_more_than(self.stats_columns, ['state']):
# and only 1 timefilter which is >=
self.cache_category = CACHE_GLOBAL_STATS
elif self.is_a_closed_chapter():
self.cache_category = CACHE_IRREVERSIBLE_HISTORY
elif self.table == 'services' and not self.is_stats and has_not_more_than(self.columns, ['host_name', 'description', 'state', 'state_type']):
self.cache_category = CACHE_SERVICE_STATS
else:
pass
logger.debug("[Livestatus Query Metainfo] I cannot cache this %s" % str(self))
# Initial implementation only respects the = operator (~ may be an option in the future)
all_filters = sorted([str(f[1]) for f in self.structured_data if (f[0] == 'Filter')])
eq_filters = sorted([str(f[1]) for f in self.structured_data if (f[0] == 'Filter' and f[2] == '=')])
unique_eq_filters = sorted({}.fromkeys(eq_filters).keys())
ge_contains_filters = sorted([str(f[1]) for f in self.structured_data if (f[0] == 'Filter' and f[2] == '>=')])
unique_ge_contains_filters = sorted({}.fromkeys(ge_contains_filters).keys())
logger.debug("[Livestatus Query Metainfo] ge_contains_filters: %s" % str(ge_contains_filters))
logger.debug("[Livestatus Query Metainfo] unique_ge_contains_filters: %s" % str(unique_ge_contains_filters))
if [f for f in self.structured_data if f[0] == 'Negate']:
# HANDS OFF!!!!
# This might be something like:
# NOT (description=test_ok_00 AND host_name=test_host_005)
# Using hints to preselect the hosts/services will result in
# absolutely wrong results.
pass
elif self.table == 'hosts' or self.table == 'hostsbygroup':
# Do we have exactly 1 Filter, which is 'name'?
if eq_filters == ['name']:
if len(eq_filters) == len([f for f in self.structured_data if (f[0] == 'Filter' and f[1] == 'name')]):
self.query_hints['target'] = HINT_HOST
self.query_hints['host_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[2] == '=')][0]
# this helps: thruk_host_detail, thruk_host_status_detail, thruk_service_detail, nagvis_host_icon
elif unique_eq_filters == ['name']:
# we want a lot of services selected by
# Filter: host_name
# Filter: host_name
# ...
# Or: n
hosts = []
only_hosts = True
try:
num_hosts = 0
for i, _ in enumerate(self.structured_data):
if self.structured_data[i][0] == 'Filter' and self.structured_data[i][1] == 'name':
if self.structured_data[i+1][0] == 'Filter' and self.structured_data[i+1][1] == 'name':
num_hosts += 1
hosts.append(self.structured_data[i][3])
elif self.structured_data[i+1][0] == 'Or' and self.structured_data[i+1][1] == num_hosts + 1:
num_hosts += 1
hosts.append(self.structured_data[i][3])
only_hosts = True
else:
only_hosts = False
except Exception, exp:
only_hosts = False
if only_hosts:
if len(hosts) == len(filter(lambda x: x[0] == 'Filter' and x[1] == 'name', self.structured_data)):
hosts = list(set(hosts))
hosts.sort()
self.query_hints['target'] = HINT_HOSTS
self.query_hints['host_name'] = hosts
# this helps: nagvis host icons
elif ge_contains_filters == ['groups']:
# we want the all the hosts in a hostgroup
if len(ge_contains_filters) == len([f for f in self.structured_data if (f[0] == 'Filter' and (f[1] == 'groups' or f[1] == 'name'))]):
self.query_hints['target'] = HINT_HOSTS_BY_GROUP
self.query_hints['hostgroup_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[2] == '>=')][0]
# this helps: nagvis hostgroup
elif self.table == 'services' or self.table == 'servicesbygroup' or self.table == 'servicesbyhostgroup':
if eq_filters == ['host_name']:
# Do we have exactly 1 Filter, which is 'host_name'?
# In this case, we want the services of this single host
if len(eq_filters) == len([f for f in self.structured_data if (f[0] == 'Filter' and f[1] == 'host_name')]):
self.query_hints['target'] = HINT_SERVICES_BY_HOST
self.query_hints['host_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[2] == '=')][0]
# this helps: multisite_host_detail
elif eq_filters == ['description', 'host_name']:
# We want one specific service
self.query_hints['target'] = HINT_SERVICE
self.query_hints['host_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[1] == 'host_name' and f[2] == '=')][0]
self.query_hints['service_description'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[1] == 'description' and f[2] == '=')][0]
# this helps: multisite_service_detail, thruk_service_detail, nagvis_service_icon
elif unique_eq_filters == ['host_name']:
# we want a lot of services selected by
# Filter: host_name
# Filter: host_name
# ...
# Or: n
hosts = []
only_hosts = True
try:
num_hosts = 0
for i, _ in enumerate(self.structured_data):
if self.structured_data[i][0] == 'Filter' and self.structured_data[i][1] == 'host_name':
if self.structured_data[i+1][0] == 'Filter' and self.structured_data[i+1][1] == 'host_name':
num_hosts += 1
hosts.append(self.structured_data[i][3])
elif self.structured_data[i+1][0] == 'Or' and self.structured_data[i+1][1] == num_hosts + 1:
num_hosts += 1
hosts.append(self.structured_data[i][3])
only_hosts = True
else:
only_hosts = False
except Exception, exp:
only_hosts = False
if only_hosts:
if len(hosts) == len(filter(lambda x: x[0] == 'Filter' and x[1] == 'host_name', self.structured_data)):
hosts = list(set(hosts))
hosts.sort()
self.query_hints['target'] = HINT_SERVICES_BY_HOSTS
self.query_hints['host_name'] = hosts
# this helps: nagvis host icons
elif unique_eq_filters == ['description', 'host_name']:
# we want a lot of services selected by
# Filter: host_name
# Filter: service_description
# And: 2
services = []
only_services = True
try:
for i, _ in enumerate(self.structured_data):
if self.structured_data[i][0] == 'Filter' and self.structured_data[i][1] == 'host_name':
if self.structured_data[i+1][0] == 'Filter' and self.structured_data[i+1][1] == 'description' and self.structured_data[i+2][0] == 'And' and self.structured_data[i+2][1] == 2:
services.append((self.structured_data[i][3], self.structured_data[i+1][3]))
elif self.structured_data[i-1][0] == 'Filter' and self.structured_data[i-1][1] == 'description' and self.structured_data[i+1][0] == 'And' and self.structured_data[i+1][1] == 2:
services.append((self.structured_data[i][3], self.structured_data[i-1][3]))
else:
only_services = False
break
except Exception, exp:
only_services = False
if only_services:
if len(services) == len(filter(lambda x: x[0] == 'Filter' and x[1] == 'description', self.structured_data)):
#len([None for stmt in self.structured_data if stmt[0] == 'Filter' and stmt[1] == 'description']):
services = set(services)
hosts = set([svc[0] for svc in services])
# num_hosts < num_services / 2
# hint : hosts_names
if len(hosts) == 1:
self.query_hints['target'] = HINT_SERVICES_BY_HOST
self.query_hints['host_name'] = hosts.pop()
else:
self.query_hints['target'] = HINT_SERVICES
self.query_hints['host_names_service_descriptions'] = services
elif ge_contains_filters == ['groups']:
# we want the all the services in a servicegroup
logger.debug("[Livestatus Query Metainfo] structure_date: %s" % str(self.structured_data))
if len(ge_contains_filters) == len([f for f in self.structured_data if (f[0] == 'Filter' and (f[1] == 'groups' or f[1] == 'description'))]):
self.query_hints['target'] = HINT_SERVICES_BY_GROUP
self.query_hints['servicegroup_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[2] == '>=')][0]
# this helps: nagvis servicegroup
elif ge_contains_filters == ['host_groups']:
# we want the services of all the hosts in a hostgroup
pass
# Do we have exactly 1 Filter, which is 'host_name'?
# In this case, we want the services of this single host
if len(ge_contains_filters) == len([f for f in self.structured_data if (f[0] == 'Filter' and f[1].startswith('host'))]):
self.query_hints['target'] = HINT_SERVICES_BY_HOSTGROUP
self.query_hints['hostgroup_name'] = [f[3] for f in self.structured_data if (f[0] == 'Filter' and f[2] == '>=')][0]
# this helps: nagvis hostgroup
| agpl-3.0 |
mgyenik/micropython | tests/float/float1.py | 16 | 1625 | # test basic float capabilities
# literals
print(.12)
print(1.)
print(1.2)
# float construction
print(float(1.2))
print(float("1.2"))
print(float("+1"))
print(float("1e1"))
print(float("1e+1"))
print(float("1e-1"))
print(float("inf"))
print(float("-inf"))
print(float("INF"))
print(float("infinity"))
print(float("INFINITY"))
print(float("nan"))
print(float("NaN"))
try:
float("1e+")
except ValueError:
print("ValueError")
try:
float("1z")
except ValueError:
print("ValueError")
# unary operators
print(bool(0.0))
print(bool(1.2))
print(+(1.2))
print(-(1.2))
# division of integers
x = 1 / 2
print(x)
# /= operator
a = 1
a /= 2
print(a)
# floor division
print(1.0 // 2)
print(2.0 // 2)
# comparison
print(1.2 <= 3.4)
print(1.2 <= -3.4)
print(1.2 >= 3.4)
print(1.2 >= -3.4)
try:
1.0 / 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.0 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1.2 % 0
except ZeroDivisionError:
print("ZeroDivisionError")
# unsupported unary ops
try:
~1.2
except TypeError:
print("TypeError")
try:
1.2 in 3.4
except TypeError:
print("TypeError")
# can't convert list to float
try:
float([])
except TypeError:
print("TypeError")
# test constant float with more than 255 chars
x = 1.84728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189847286994360590525163982511496317718984728699436059052516398251149631771898472869943605905251639825114963177189
print("%.5f" % x)
| mit |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/host.py | 119 | 4515 | # Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import sys
from webkitpy.common.checkout import Checkout
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.memoized import memoized
from webkitpy.common.net import bugzilla, buildbot, web
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.watchlist.watchlistparser import WatchListParser
from webkitpy.port.factory import PortFactory
_log = logging.getLogger(__name__)
class Host(SystemHost):
def __init__(self):
SystemHost.__init__(self)
self.web = web.Web()
# FIXME: Checkout should own the scm object.
self._scm = None
self._checkout = None
# Everything below this line is WebKit-specific and belongs on a higher-level object.
self.bugs = bugzilla.Bugzilla()
self.buildbot = buildbot.BuildBot()
# FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
# In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
# so for now we just pass along the whole Host object.
# FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
self.port_factory = PortFactory(self)
self._engage_awesome_locale_hacks()
# We call this from the Host constructor, as it's one of the
# earliest calls made for all webkitpy-based programs.
def _engage_awesome_locale_hacks(self):
# To make life easier on our non-english users, we override
# the locale environment variables inside webkitpy.
# If we don't do this, programs like SVN will output localized
# messages and svn.py will fail to parse them.
# FIXME: We should do these overrides *only* for the subprocesses we know need them!
# This hack only works in unix environments.
os.environ['LANGUAGE'] = 'en'
os.environ['LANG'] = 'en_US.UTF-8'
os.environ['LC_MESSAGES'] = 'en_US.UTF-8'
os.environ['LC_ALL'] = ''
def initialize_scm(self, patch_directories=None):
detector = SCMDetector(self.filesystem, self.executive)
self._scm = detector.default_scm(patch_directories)
self._checkout = Checkout(self.scm())
def scm(self):
return self._scm
def checkout(self):
return self._checkout
@memoized
def watch_list(self):
config_path = self.filesystem.dirname(self.filesystem.path_to_module('webkitpy.common.config'))
watch_list_full_path = self.filesystem.join(config_path, 'watchlist')
if not self.filesystem.exists(watch_list_full_path):
raise Exception('Watch list file (%s) not found.' % watch_list_full_path)
watch_list_contents = self.filesystem.read_text_file(watch_list_full_path)
return WatchListParser().parse(watch_list_contents)
| bsd-3-clause |
f2um2326/LogisticRegression | LogisticReg_myfit.py | 1 | 3358 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 14:40:08 2015
@author: shimba
"""
# print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn import linear_model
from sklearn import datasets
from sklearn import cross_validation
from sklearn import grid_search
class LogisticRegression(BaseEstimator, RegressorMixin):
def __init__(self, penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.fit_intercept = fit_intercept
self.C = C
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
def phi(self, x, y):
return np.array([x, y, 1])
def sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
def fit(self, X, y):
self.w = np.random.randn(3) # initialize parameter
eta = 0.1
# the gradient of error function
numFeature = X.shape[0]
for n in xrange(50):
list = range(numFeature)
np.random.shuffle(list)
for i in list:
t_n = y[i]
x_i, y_i = X[i, :]
feature = self.phi(x_i, y_i)
#print u"%d, %d" % (i, t_n)
predict = self.sigmoid(np.inner(self.w, feature))
self.w -= eta * (predict - t_n) * feature
#print "%f, %f" % (w[0]/w[2], w[1]/w[2])
#print self.w
#gradient_E = np.dot(feature.T, Y-T)
eta *= 0.9
#print eta
#gradient_E = np.dot(feature.T, Y-T)
return self
def LogisticRegression_main():
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# parameter?
# cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
#clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
#clf.fit(X, y)
regressor = LogisticRegression()
regressor.fit(X, y)
# plot result
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
#Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
#Z = regressor.predict(np.c_[xx.ravel(), yy.ravel()])
#Z = Z.reshape(xx.shape)
#plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
w = regressor.w
# 図を描くための準備
seq = np.arange(1, 8, 0.01)
xlist, ylist = np.meshgrid(seq, seq)
zlist = [regressor.sigmoid(np.inner(w, regressor.phi(a, b))) for a, b in zip(xlist, ylist)]
# 散布図と予測分布を描画
plt.imshow(zlist, extent=[1,8,1,8], origin='lower', cmap=plt.cm.PiYG_r)
plt.plot(X[y==1,0], X[y==1,1], 'o', color='red')
plt.plot(X[y==0,0], X[y==0,1], 'o', color='blue')
plt.show()
if __name__ == '__main__':
LogisticRegression_main() | mit |
JustinWingChungHui/MyFamilyRoot | auth_api/tests.py | 2 | 5613 | from django.test import TestCase
from django.test.utils import override_settings
from rest_framework import status
from rest_framework.test import APIClient
from axes.signals import user_locked_out
import json
import time
from family_tree.models.family import Family
from family_tree.models.person import Person
from custom_user.models import User
@override_settings(SECURE_SSL_REDIRECT=False, AXES_BEHIND_REVERSE_PROXY=False)
class JWTAuthTest(TestCase):
'''
Tests JWT auth
'''
def setUp(self):
self.family = Family()
self.family.save()
self.user = User.objects.create_user(email='[email protected]',
password='compiler',
name='Grace Hopper',
family_id = self.family.id)
self.person = Person(name='Grace Hopper',
gender='F',
email='[email protected]',
family_id=self.family.id,
language='en',
user_id=self.user.id)
self.person.save()
def test_jwt_auth_and_refresh_token_created_on_correct_auth_details(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
auth_details = {
'email': '[email protected]',
'password': 'compiler'
}
response = client.post('/api/auth/obtain_token/', auth_details, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
access_token = json.loads(response.content)["access"]
refresh_token = json.loads(response.content)["refresh"]
auth_token = {
'refresh': refresh_token
}
# Sleep to ensure new token is different
time.sleep(1)
refresh_response = client.post('/api/auth/refresh_token/', auth_token, format='json')
refresh_token = json.loads(refresh_response.content)["access"]
self.assertEqual(refresh_response.status_code, status.HTTP_200_OK)
self.assertNotEqual(refresh_token, access_token)
# Check verify token
new_auth_token ={#
'token': refresh_token
}
verify_new_token_response = client.post('/api/auth/verify_token/', new_auth_token, format='json')
self.assertEqual(verify_new_token_response.status_code, status.HTTP_200_OK)
# Check ip not locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertEqual(b'false', locked_response.content)
self.assertEqual(locked_response.status_code, status.HTTP_200_OK)
def test_jwt_fails_on_auth_incorrect_password(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
payload = {
'email': '[email protected]',
'password': 'COBOL'
}
response = client.post('/api/auth/obtain_token/', payload, format='json')
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
def test_verify_fails_on_invalid_token(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
invalid_auth_token ={#
'token': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImp0aSI6IjM1ODU0ODc3LWQyZjQtNDIxZS04ZDI5LWY3YTgxNTk3NzdhYyIsImlhdCI6MTU1NDM4NzU4NCwiZXhwIjoxNTU0MzkxMTg0fQ.yIr0TMbalatx7alU1TMGIxxaelqquMJfz3m4H7AA9v4'
}
verify_old_token_response = client.post('/api/auth/verify_token/', invalid_auth_token, format='json')
self.assertNotEqual(verify_old_token_response.status_code, status.HTTP_200_OK)
def test_account_locks_out_on_multiple_invalid_login_attempts(self):
user = User.objects.create_user(email='[email protected]',
password='smalltalk',
name='Adele Goldberg',
family_id = self.family.id)
person = Person(name='Adele Goldberg',
gender='F',
email='[email protected]',
family_id=self.family.id,
language='en',
user_id=user.id)
person.save()
# 127.0.0.1 is whitelisted
client = APIClient(HTTP_X_REAL_IP='127.0.0.2')
wrong_auth_details = {
'email': '[email protected]',
'password': 'compiler'
}
for x in range(0, 6):
response = client.post('/api/auth/obtain_token/', wrong_auth_details, format='json')
correct_auth_details = {
'email': '[email protected]',
'password': 'smalltalk'
}
final_response = client.post('/api/auth/obtain_token/', correct_auth_details, format='json')
self.assertNotEqual(final_response.status_code, status.HTTP_200_OK)
# Check ip locked
locked_response = client.get('/api/auth/is_locked/', format='json')
self.assertNotEqual(b'false', locked_response.content)
def test_api_docs_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/docs/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_schema_loads(self):
client = APIClient(HTTP_X_REAL_IP='127.0.0.1')
client.force_authenticate(user=self.user)
response = client.get('/api/schema/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| gpl-2.0 |
pombredanne/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Sig.py | 61 | 2358 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Sig.py 5134 2010/08/16 23:02:40 bdeegan"
__doc__ = """Place-holder for the old SCons.Sig module hierarchy
This is no longer used, but code out there (such as the NSIS module on
the SCons wiki) may try to import SCons.Sig. If so, we generate a warning
that points them to the line that caused the import, and don't die.
If someone actually tried to use the sub-modules or functions within
the package (for example, SCons.Sig.MD5.signature()), then they'll still
get an AttributeError, but at least they'll know where to start looking.
"""
import SCons.Util
import SCons.Warnings
msg = 'The SCons.Sig module no longer exists.\n' \
' Remove the following "import SCons.Sig" line to eliminate this warning:'
SCons.Warnings.warn(SCons.Warnings.DeprecatedSigModuleWarning, msg)
default_calc = None
default_module = None
class MD5Null(SCons.Util.Null):
def __repr__(self):
return "MD5Null()"
class TimeStampNull(SCons.Util.Null):
def __repr__(self):
return "TimeStampNull()"
MD5 = MD5Null()
TimeStamp = TimeStampNull()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
migonzalvar/youtube-dl | youtube_dl/extractor/comcarcoff.py | 85 | 2219 | # encoding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import parse_iso8601
class ComCarCoffIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
_TESTS = [{
'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
'info_dict': {
'id': 'miranda-sings-happy-thanksgiving-miranda',
'ext': 'mp4',
'upload_date': '20141127',
'timestamp': 1417107600,
'title': 'Happy Thanksgiving Miranda',
'description': 'Jerry Seinfeld and his special guest Miranda Sings cruise around town in search of coffee, complaining and apologizing along the way.',
'thumbnail': 'http://ccc.crackle.com/images/s5e4_thumb.jpg',
},
'params': {
'skip_download': 'requires ffmpeg',
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
if not display_id:
display_id = 'comediansincarsgettingcoffee.com'
webpage = self._download_webpage(url, display_id)
full_data = json.loads(self._search_regex(
r'<script type="application/json" id="videoData">(?P<json>.+?)</script>',
webpage, 'full data json'))
video_id = full_data['activeVideo']['video']
video_data = full_data.get('videos', {}).get(video_id) or full_data['singleshots'][video_id]
thumbnails = [{
'url': video_data['images']['thumb'],
}, {
'url': video_data['images']['poster'],
}]
formats = self._extract_m3u8_formats(
video_data['mediaUrl'], video_id, ext='mp4')
return {
'id': video_id,
'display_id': display_id,
'title': video_data['title'],
'description': video_data.get('description'),
'timestamp': parse_iso8601(video_data.get('pubDate')),
'thumbnails': thumbnails,
'formats': formats,
'webpage_url': 'http://comediansincarsgettingcoffee.com/%s' % (video_data.get('urlSlug', video_data.get('slug'))),
}
| unlicense |
kost/volatility | volatility/plugins/pooltracker.py | 8 | 9708 | # Volatility
# Copyright (C) Michael Ligh <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
import volatility.plugins.common as common
import volatility.utils as utils
import volatility.win32.tasks as tasks
import volatility.obj as obj
import volatility.debug as debug
import volatility.poolscan as poolscan
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
#--------------------------------------------------------------------------------
# Profile Modifications
#--------------------------------------------------------------------------------
class PoolTrackTagOverlay(obj.ProfileModification):
"""Overlays for pool trackers"""
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.merge_overlay({
'_POOL_TRACKER_TABLE': [ None, {
'Key': [ None, ['String', dict(length = 4)]]
}],
})
#--------------------------------------------------------------------------------
# PoolTracker Plugin
#--------------------------------------------------------------------------------
class PoolTracker(common.AbstractWindowsCommand):
"""Show a summary of pool tag usage"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
config.add_option('TAGS', short_option = 't', help = 'Pool tag to find')
config.add_option('TAGFILE', short_option = 'T',
help = 'Pool tag file (pooltag.txt)', default = None)
config.add_option('WHITELIST', short_option = 'W',
help = 'Apply whitelist (only show third party tags)',
default = False, action = "store_true")
config.add_option('SHOW-FREE', short_option = 'F',
help = 'Show tags with no allocations',
default = False, action = "store_true")
@staticmethod
def is_valid_profile(profile):
return (profile.metadata.get('os', 'unknown') == 'windows' and
profile.metadata.get('major', 0) == 6)
def calculate(self):
kernel_space = utils.load_as(self._config)
if not self.is_valid_profile(kernel_space.profile):
debug.error("Windows XP/2003 does not track pool tags")
knowntags = {}
if self._config.TAGFILE and os.path.isfile(self._config.TAGFILE):
taglines = open(self._config.TAGFILE).readlines()
for tag in taglines:
tag = tag.strip()
if tag.startswith("rem") or tag.startswith(" ") or tag == "":
continue
info = tag.split("-", 2)
try:
key = info[0].strip()
driver = info[1].strip()
reason = info[2].strip()
except IndexError:
continue
knowntags[key] = (driver, reason)
track_table = tasks.get_kdbg(kernel_space).PoolTrackTable
# not really an address, this is just a trick to get
# a 32bit number on x86 and 64bit number on x64. the
# size is always directly before the pool table.
table_size = obj.Object("address", offset =
track_table - kernel_space.profile.get_obj_size("address"),
vm = kernel_space
)
track_table = track_table.dereference_as("address")
entries = obj.Object("Array", targetType = "_POOL_TRACKER_TABLE",
offset = track_table, count = table_size,
vm = kernel_space
)
if self._config.TAGS:
tags = [tag for tag in self._config.TAGS.split(",")]
else:
tags = []
for entry in entries:
if not self._config.SHOW_FREE:
if entry.PagedBytes == 0 and entry.NonPagedBytes == 0:
continue
if not tags or entry.Key in tags:
try:
(driver, reason) = knowntags[str(entry.Key).strip()]
if self._config.WHITELIST:
continue
except KeyError:
(driver, reason) = ("", "")
yield entry, driver, reason
def render_whitelist(self, outfd, data):
for entry, driver, reason in data:
if str(entry.Key) == "":
continue
outfd.write("{0} - {1} - {2}\n".format(entry.Key, driver, reason))
def render_text(self, outfd, data):
self.table_header(outfd, [("Tag", "6"),
("NpAllocs", "8"),
("NpFrees", "8"),
("NpBytes", "8"),
("PgAllocs", "8"),
("PgFrees", "8"),
("PgBytes", "8"),
("Driver", "20"),
("Reason", "")])
for entry, driver, reason in data:
if str(entry.Key) == "":
continue
self.table_row(outfd, entry.Key, entry.NonPagedAllocs,
entry.NonPagedFrees, entry.NonPagedBytes, entry.PagedAllocs,
entry.PagedFrees, entry.PagedBytes,
driver, reason)
def unified_output(self, data):
return TreeGrid([("Tag", str),
("NpAllocs", int),
("NpFrees", int),
("NpBytes", int),
("PgAllocs", int),
("PgFrees", int),
("PgBytes", int),
("Driver", str),
("Reason", str)],
self.generator(data))
def generator(self, data):
for entry, driver, reason in data:
if str(entry.Key) == "":
continue
yield (0, [str(entry.Key),
int(entry.NonPagedAllocs),
int(entry.NonPagedFrees),
int(entry.NonPagedBytes),
int(entry.PagedAllocs),
int(entry.PagedFrees),
int(entry.PagedBytes),
str(driver),
str(reason)])
#--------------------------------------------------------------------------------
# Configurable PoolScanner Plugin
#--------------------------------------------------------------------------------
class GenericPoolScan(poolscan.SinglePoolScanner):
"""Configurable pool scanner"""
class PoolPeek(common.AbstractWindowsCommand):
"""Configurable pool scanner plugin"""
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
config.add_option('TAG', short_option = 't',
help = 'Pool tag to find')
config.add_option('MIN-SIZE', short_option = 'm',
type = 'int',
help = 'Minimum size of the pool to find (default: 0)',
default = 0)
config.add_option('MAX-SIZE', short_option = 'M',
type = 'int',
help = 'Maximum size of the pool to find (default: 4096)',
default = 4096)
config.add_option('PAGED', short_option = 'P',
help = 'Search in paged pools (default: False)',
default = False, action = "store_true")
def calculate(self):
addr_space = utils.load_as(self._config)
tag = self._config.TAG
if tag == None:
debug.error("You must enter a --tag to find")
minsize = self._config.MIN_SIZE
maxsize = self._config.MAX_SIZE
poolsize = lambda x : x >= minsize and x <= maxsize
if self._config.PAGED:
paged = True
non_paged = False
else:
paged = False
non_paged = True
scanner = GenericPoolScan()
scanner.checks = [
('PoolTagCheck', dict(tag = tag)),
('CheckPoolSize', dict(condition = poolsize)),
('CheckPoolType', dict(paged = paged, non_paged = non_paged)),
]
for offset in scanner.scan(addr_space):
pool = obj.Object("_POOL_HEADER", offset = offset, vm = addr_space)
buf = addr_space.zread(offset, minsize)
yield pool, buf
def render_text(self, outfd, data):
for pool, buf in data:
pool_alignment = obj.VolMagic(pool.obj_vm).PoolAlignment.v()
outfd.write("Pool Header: {0:#x}, Size: {1}\n".format(
pool.obj_offset,
pool.BlockSize * pool_alignment))
outfd.write("{0}\n".format("\n".join(
["{0:#010x} {1:<48} {2}".format(pool.obj_offset + o, h, ''.join(c))
for o, h, c in utils.Hexdump(buf)
])))
outfd.write("\n")
| gpl-2.0 |
eliangidoni/rethinkdb | test/common/http_support/werkzeug/testsuite/test.py | 145 | 18933 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.test
~~~~~~~~~~~~~~~~~~~~~~~
Tests the testing tools.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
import unittest
from io import BytesIO
from werkzeug._compat import iteritems, to_bytes
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import Request, Response, BaseResponse
from werkzeug.test import Client, EnvironBuilder, create_environ, \
ClientRedirectError, stream_encode_multipart, run_wsgi_app
from werkzeug.utils import redirect
from werkzeug.formparser import parse_form_data
from werkzeug.datastructures import MultiDict, FileStorage
def cookie_app(environ, start_response):
"""A WSGI application which sets a cookie, and returns as a ersponse any
cookie which exists.
"""
response = Response(environ.get('HTTP_COOKIE', 'No Cookie'),
mimetype='text/plain')
response.set_cookie('test', 'test')
return response(environ, start_response)
def redirect_loop_app(environ, start_response):
response = redirect('http://localhost/some/redirect/')
return response(environ, start_response)
def redirect_with_get_app(environ, start_response):
req = Request(environ)
if req.url not in ('http://localhost/',
'http://localhost/first/request',
'http://localhost/some/redirect/'):
assert False, 'redirect_demo_app() did not expect URL "%s"' % req.url
if '/some/redirect' not in req.url:
response = redirect('http://localhost/some/redirect/')
else:
response = Response('current url: %s' % req.url)
return response(environ, start_response)
def redirect_with_post_app(environ, start_response):
req = Request(environ)
if req.url == 'http://localhost/some/redirect/':
assert req.method == 'GET', 'request should be GET'
assert not req.form, 'request should not have data'
response = Response('current url: %s' % req.url)
else:
response = redirect('http://localhost/some/redirect/')
return response(environ, start_response)
def external_redirect_demo_app(environ, start_response):
response = redirect('http://example.com/')
return response(environ, start_response)
def external_subdomain_redirect_demo_app(environ, start_response):
if 'test.example.com' in environ['HTTP_HOST']:
response = Response('redirected successfully to subdomain')
else:
response = redirect('http://test.example.com/login')
return response(environ, start_response)
def multi_value_post_app(environ, start_response):
req = Request(environ)
assert req.form['field'] == 'val1', req.form['field']
assert req.form.getlist('field') == ['val1', 'val2'], req.form.getlist('field')
response = Response('ok')
return response(environ, start_response)
class TestTestCase(WerkzeugTestCase):
def test_cookie_forging(self):
c = Client(cookie_app)
c.set_cookie('localhost', 'foo', 'bar')
appiter, code, headers = c.open()
self.assert_strict_equal(list(appiter), [b'foo=bar'])
def test_set_cookie_app(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
self.assert_in('Set-Cookie', dict(headers))
def test_cookiejar_stores_cookie(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
self.assert_in('test', c.cookie_jar._cookies['localhost.local']['/'])
def test_no_initial_cookie(self):
c = Client(cookie_app)
appiter, code, headers = c.open()
self.assert_strict_equal(b''.join(appiter), b'No Cookie')
def test_resent_cookie(self):
c = Client(cookie_app)
c.open()
appiter, code, headers = c.open()
self.assert_strict_equal(b''.join(appiter), b'test=test')
def test_disable_cookies(self):
c = Client(cookie_app, use_cookies=False)
c.open()
appiter, code, headers = c.open()
self.assert_strict_equal(b''.join(appiter), b'No Cookie')
def test_cookie_for_different_path(self):
c = Client(cookie_app)
c.open('/path1')
appiter, code, headers = c.open('/path2')
self.assert_strict_equal(b''.join(appiter), b'test=test')
def test_environ_builder_basics(self):
b = EnvironBuilder()
self.assert_is_none(b.content_type)
b.method = 'POST'
self.assert_equal(b.content_type, 'application/x-www-form-urlencoded')
b.files.add_file('test', BytesIO(b'test contents'), 'test.txt')
self.assert_equal(b.files['test'].content_type, 'text/plain')
self.assert_equal(b.content_type, 'multipart/form-data')
b.form['test'] = 'normal value'
req = b.get_request()
b.close()
self.assert_strict_equal(req.url, u'http://localhost/')
self.assert_strict_equal(req.method, 'POST')
self.assert_strict_equal(req.form['test'], u'normal value')
self.assert_equal(req.files['test'].content_type, 'text/plain')
self.assert_strict_equal(req.files['test'].filename, u'test.txt')
self.assert_strict_equal(req.files['test'].read(), b'test contents')
def test_environ_builder_headers(self):
b = EnvironBuilder(environ_base={'HTTP_USER_AGENT': 'Foo/0.1'},
environ_overrides={'wsgi.version': (1, 1)})
b.headers['X-Suck-My-Dick'] = 'very well sir'
env = b.get_environ()
self.assert_strict_equal(env['HTTP_USER_AGENT'], 'Foo/0.1')
self.assert_strict_equal(env['HTTP_X_SUCK_MY_DICK'], 'very well sir')
self.assert_strict_equal(env['wsgi.version'], (1, 1))
b.headers['User-Agent'] = 'Bar/1.0'
env = b.get_environ()
self.assert_strict_equal(env['HTTP_USER_AGENT'], 'Bar/1.0')
def test_environ_builder_headers_content_type(self):
b = EnvironBuilder(headers={'Content-Type': 'text/plain'})
env = b.get_environ()
self.assert_equal(env['CONTENT_TYPE'], 'text/plain')
b = EnvironBuilder(content_type='text/html',
headers={'Content-Type': 'text/plain'})
env = b.get_environ()
self.assert_equal(env['CONTENT_TYPE'], 'text/html')
def test_environ_builder_paths(self):
b = EnvironBuilder(path='/foo', base_url='http://example.com/')
self.assert_strict_equal(b.base_url, 'http://example.com/')
self.assert_strict_equal(b.path, '/foo')
self.assert_strict_equal(b.script_root, '')
self.assert_strict_equal(b.host, 'example.com')
b = EnvironBuilder(path='/foo', base_url='http://example.com/bar')
self.assert_strict_equal(b.base_url, 'http://example.com/bar/')
self.assert_strict_equal(b.path, '/foo')
self.assert_strict_equal(b.script_root, '/bar')
self.assert_strict_equal(b.host, 'example.com')
b.host = 'localhost'
self.assert_strict_equal(b.base_url, 'http://localhost/bar/')
b.base_url = 'http://localhost:8080/'
self.assert_strict_equal(b.host, 'localhost:8080')
self.assert_strict_equal(b.server_name, 'localhost')
self.assert_strict_equal(b.server_port, 8080)
b.host = 'foo.invalid'
b.url_scheme = 'https'
b.script_root = '/test'
env = b.get_environ()
self.assert_strict_equal(env['SERVER_NAME'], 'foo.invalid')
self.assert_strict_equal(env['SERVER_PORT'], '443')
self.assert_strict_equal(env['SCRIPT_NAME'], '/test')
self.assert_strict_equal(env['PATH_INFO'], '/foo')
self.assert_strict_equal(env['HTTP_HOST'], 'foo.invalid')
self.assert_strict_equal(env['wsgi.url_scheme'], 'https')
self.assert_strict_equal(b.base_url, 'https://foo.invalid/test/')
def test_environ_builder_content_type(self):
builder = EnvironBuilder()
self.assert_is_none(builder.content_type)
builder.method = 'POST'
self.assert_equal(builder.content_type, 'application/x-www-form-urlencoded')
builder.form['foo'] = 'bar'
self.assert_equal(builder.content_type, 'application/x-www-form-urlencoded')
builder.files.add_file('blafasel', BytesIO(b'foo'), 'test.txt')
self.assert_equal(builder.content_type, 'multipart/form-data')
req = builder.get_request()
self.assert_strict_equal(req.form['foo'], u'bar')
self.assert_strict_equal(req.files['blafasel'].read(), b'foo')
def test_environ_builder_stream_switch(self):
d = MultiDict(dict(foo=u'bar', blub=u'blah', hu=u'hum'))
for use_tempfile in False, True:
stream, length, boundary = stream_encode_multipart(
d, use_tempfile, threshold=150)
self.assert_true(isinstance(stream, BytesIO) != use_tempfile)
form = parse_form_data({'wsgi.input': stream, 'CONTENT_LENGTH': str(length),
'CONTENT_TYPE': 'multipart/form-data; boundary="%s"' %
boundary})[1]
self.assert_strict_equal(form, d)
stream.close()
def test_environ_builder_unicode_file_mix(self):
for use_tempfile in False, True:
f = FileStorage(BytesIO(u'\N{SNOWMAN}'.encode('utf-8')),
'snowman.txt')
d = MultiDict(dict(f=f, s=u'\N{SNOWMAN}'))
stream, length, boundary = stream_encode_multipart(
d, use_tempfile, threshold=150)
self.assert_true(isinstance(stream, BytesIO) != use_tempfile)
_, form, files = parse_form_data({
'wsgi.input': stream,
'CONTENT_LENGTH': str(length),
'CONTENT_TYPE': 'multipart/form-data; boundary="%s"' %
boundary
})
self.assert_strict_equal(form['s'], u'\N{SNOWMAN}')
self.assert_strict_equal(files['f'].name, 'f')
self.assert_strict_equal(files['f'].filename, u'snowman.txt')
self.assert_strict_equal(files['f'].read(),
u'\N{SNOWMAN}'.encode('utf-8'))
stream.close()
def test_create_environ(self):
env = create_environ('/foo?bar=baz', 'http://example.org/')
expected = {
'wsgi.multiprocess': False,
'wsgi.version': (1, 0),
'wsgi.run_once': False,
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.url_scheme': 'http',
'SCRIPT_NAME': '',
'CONTENT_TYPE': '',
'CONTENT_LENGTH': '0',
'SERVER_NAME': 'example.org',
'REQUEST_METHOD': 'GET',
'HTTP_HOST': 'example.org',
'PATH_INFO': '/foo',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': 'bar=baz'
}
for key, value in iteritems(expected):
self.assert_equal(env[key], value)
self.assert_strict_equal(env['wsgi.input'].read(0), b'')
self.assert_strict_equal(create_environ('/foo', 'http://example.com/')['SCRIPT_NAME'], '')
def test_file_closing(self):
closed = []
class SpecialInput(object):
def read(self):
return ''
def close(self):
closed.append(self)
env = create_environ(data={'foo': SpecialInput()})
self.assert_strict_equal(len(closed), 1)
builder = EnvironBuilder()
builder.files.add_file('blah', SpecialInput())
builder.close()
self.assert_strict_equal(len(closed), 2)
def test_follow_redirect(self):
env = create_environ('/', base_url='http://localhost')
c = Client(redirect_with_get_app)
appiter, code, headers = c.open(environ_overrides=env, follow_redirects=True)
self.assert_strict_equal(code, '200 OK')
self.assert_strict_equal(b''.join(appiter), b'current url: http://localhost/some/redirect/')
# Test that the :cls:`Client` is aware of user defined response wrappers
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
resp = c.get('/', follow_redirects=True)
self.assert_strict_equal(resp.status_code, 200)
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
# test with URL other than '/' to make sure redirected URL's are correct
c = Client(redirect_with_get_app, response_wrapper=BaseResponse)
resp = c.get('/first/request', follow_redirects=True)
self.assert_strict_equal(resp.status_code, 200)
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
def test_follow_external_redirect(self):
env = create_environ('/', base_url='http://localhost')
c = Client(external_redirect_demo_app)
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
def test_follow_external_redirect_on_same_subdomain(self):
env = create_environ('/', base_url='http://example.com')
c = Client(external_subdomain_redirect_demo_app, allow_subdomain_redirects=True)
c.get(environ_overrides=env, follow_redirects=True)
# check that this does not work for real external domains
env = create_environ('/', base_url='http://localhost')
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
# check that subdomain redirects fail if no `allow_subdomain_redirects` is applied
c = Client(external_subdomain_redirect_demo_app)
self.assert_raises(RuntimeError, lambda:
c.get(environ_overrides=env, follow_redirects=True))
def test_follow_redirect_loop(self):
c = Client(redirect_loop_app, response_wrapper=BaseResponse)
with self.assert_raises(ClientRedirectError):
resp = c.get('/', follow_redirects=True)
def test_follow_redirect_with_post(self):
c = Client(redirect_with_post_app, response_wrapper=BaseResponse)
resp = c.post('/', follow_redirects=True, data='foo=blub+hehe&blah=42')
self.assert_strict_equal(resp.status_code, 200)
self.assert_strict_equal(resp.data, b'current url: http://localhost/some/redirect/')
def test_path_info_script_name_unquoting(self):
def test_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [environ['PATH_INFO'] + '\n' + environ['SCRIPT_NAME']]
c = Client(test_app, response_wrapper=BaseResponse)
resp = c.get('/foo%40bar')
self.assert_strict_equal(resp.data, b'/foo@bar\n')
c = Client(test_app, response_wrapper=BaseResponse)
resp = c.get('/foo%40bar', 'http://localhost/bar%40baz')
self.assert_strict_equal(resp.data, b'/foo@bar\n/bar@baz')
def test_multi_value_submit(self):
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
data = {
'field': ['val1','val2']
}
resp = c.post('/', data=data)
self.assert_strict_equal(resp.status_code, 200)
c = Client(multi_value_post_app, response_wrapper=BaseResponse)
data = MultiDict({
'field': ['val1', 'val2']
})
resp = c.post('/', data=data)
self.assert_strict_equal(resp.status_code, 200)
def test_iri_support(self):
b = EnvironBuilder(u'/föö-bar', base_url=u'http://☃.net/')
self.assert_strict_equal(b.path, '/f%C3%B6%C3%B6-bar')
self.assert_strict_equal(b.base_url, 'http://xn--n3h.net/')
def test_run_wsgi_apps(self):
def simple_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['Hello World!']
app_iter, status, headers = run_wsgi_app(simple_app, {})
self.assert_strict_equal(status, '200 OK')
self.assert_strict_equal(list(headers), [('Content-Type', 'text/html')])
self.assert_strict_equal(app_iter, ['Hello World!'])
def yielding_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
yield 'Hello '
yield 'World!'
app_iter, status, headers = run_wsgi_app(yielding_app, {})
self.assert_strict_equal(status, '200 OK')
self.assert_strict_equal(list(headers), [('Content-Type', 'text/html')])
self.assert_strict_equal(list(app_iter), ['Hello ', 'World!'])
def test_multiple_cookies(self):
@Request.application
def test_app(request):
response = Response(repr(sorted(request.cookies.items())))
response.set_cookie(u'test1', b'foo')
response.set_cookie(u'test2', b'bar')
return response
client = Client(test_app, Response)
resp = client.get('/')
self.assert_strict_equal(resp.data, b'[]')
resp = client.get('/')
self.assert_strict_equal(resp.data,
to_bytes(repr([('test1', u'foo'), ('test2', u'bar')]), 'ascii'))
def test_correct_open_invocation_on_redirect(self):
class MyClient(Client):
counter = 0
def open(self, *args, **kwargs):
self.counter += 1
env = kwargs.setdefault('environ_overrides', {})
env['werkzeug._foo'] = self.counter
return Client.open(self, *args, **kwargs)
@Request.application
def test_app(request):
return Response(str(request.environ['werkzeug._foo']))
c = MyClient(test_app, response_wrapper=Response)
self.assert_strict_equal(c.get('/').data, b'1')
self.assert_strict_equal(c.get('/').data, b'2')
self.assert_strict_equal(c.get('/').data, b'3')
def test_correct_encoding(self):
req = Request.from_values(u'/\N{SNOWMAN}', u'http://example.com/foo')
self.assert_strict_equal(req.script_root, u'/foo')
self.assert_strict_equal(req.path, u'/\N{SNOWMAN}')
def test_full_url_requests_with_args(self):
base = 'http://example.com/'
@Request.application
def test_app(request):
return Response(request.args['x'])
client = Client(test_app, Response)
resp = client.get('/?x=42', base)
self.assert_strict_equal(resp.data, b'42')
resp = client.get('http://www.example.com/?x=23', base)
self.assert_strict_equal(resp.data, b'23')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTestCase))
return suite
| agpl-3.0 |
memtoko/django | tests/utils_tests/test_html.py | 10 | 8173 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from datetime import datetime
from unittest import TestCase
from django.test import ignore_warnings
from django.utils import html, safestring
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{} {} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2+3&z='), 'http://example.com/?x=1&y=2+3&z=')
self.assertEqual(quote('http://example.com/?x=<>"\''), 'http://example.com/?x=%3C%3E%22%27')
self.assertEqual(quote('http://example.com/?q=http://example.com/?x=1%26q=django'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
self.assertEqual(quote('http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango'),
'http://example.com/?q=http%3A%2F%2Fexample.com%2F%3Fx%3D1%26q%3Ddjango')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
| bsd-3-clause |
gdementen/PyTables | bench/postgres_backend.py | 13 | 5223 | from __future__ import print_function
import subprocess # Needs Python 2.4
from indexed_search import DB
import psycopg2 as db2
CLUSTER_NAME = "base"
DATA_DIR = "/scratch2/postgres/data/%s" % CLUSTER_NAME
#DATA_DIR = "/var/lib/pgsql/data/%s" % CLUSTER_NAME
DSN = "dbname=%s port=%s"
CREATE_DB = "createdb %s"
DROP_DB = "dropdb %s"
TABLE_NAME = "intsfloats"
PORT = 5432
class StreamChar(object):
"Object simulating a file for reading"
def __init__(self, db):
self.db = db
self.nrows = db.nrows
self.step = db.step
self.read_it = self.read_iter()
def values_generator(self):
j = 0
for i in range(self.nrows):
if i >= j * self.step:
stop = (j + 1) * self.step
if stop > self.nrows:
stop = self.nrows
arr_i4, arr_f8 = self.db.fill_arrays(i, stop)
j += 1
k = 0
yield (arr_i4[k], arr_i4[k], arr_f8[k], arr_f8[k])
k += 1
def read_iter(self):
sout = ""
n = self.nbytes
for tup in self.values_generator():
sout += "%s\t%s\t%s\t%s\n" % tup
if n is not None and len(sout) > n:
for i in range(n, len(sout), n):
rout = sout[:n]
sout = sout[n:]
yield rout
yield sout
def read(self, n=None):
self.nbytes = n
try:
str = next(self.read_it)
except StopIteration:
str = ""
return str
# required by postgres2 driver, but not used
def readline(self):
pass
class Postgres_DB(DB):
def __init__(self, nrows, rng, userandom):
DB.__init__(self, nrows, rng, userandom)
self.port = PORT
def flatten(self, l):
"""Flattens list of tuples l."""
return [x[0] for x in l]
# return map(lambda x: x[col], l)
# Overloads the method in DB class
def get_db_size(self):
sout = subprocess.Popen("sudo du -s %s" % DATA_DIR,
shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return int(line.split()[0])
def open_db(self, remove=0):
if remove:
sout = subprocess.Popen(DROP_DB % self.filename, shell=True,
stdout=subprocess.PIPE).stdout
for line in sout:
print(line)
sout = subprocess.Popen(CREATE_DB % self.filename, shell=True,
stdout=subprocess.PIPE).stdout
for line in sout:
print(line)
print("Processing database:", self.filename)
con = db2.connect(DSN % (self.filename, self.port))
self.cur = con.cursor()
return con
def create_table(self, con):
self.cur.execute("""create table %s(
col1 integer,
col2 integer,
col3 double precision,
col4 double precision)""" % TABLE_NAME)
con.commit()
def fill_table(self, con):
st = StreamChar(self)
self.cur.copy_from(st, TABLE_NAME)
con.commit()
def index_col(self, con, colname, optlevel, idxtype, verbose):
self.cur.execute("create index %s on %s(%s)" %
(colname + '_idx', TABLE_NAME, colname))
con.commit()
def do_query_simple(self, con, column, base):
self.cur.execute(
"select sum(%s) from %s where %s >= %s and %s <= %s" %
(column, TABLE_NAME,
column, base + self.rng[0],
column, base + self.rng[1]))
# "select * from %s where %s >= %s and %s <= %s" % \
# (TABLE_NAME,
# column, base+self.rng[0],
# column, base+self.rng[1]))
#results = self.flatten(self.cur.fetchall())
results = self.cur.fetchall()
return results
def do_query(self, con, column, base, *unused):
d = (self.rng[1] - self.rng[0]) / 2.
inf1 = int(self.rng[0] + base)
sup1 = int(self.rng[0] + d + base)
inf2 = self.rng[0] + base * 2
sup2 = self.rng[0] + d + base * 2
# print "lims-->", inf1, inf2, sup1, sup2
condition = "((%s>=%s) and (%s<%s)) or ((col2>%s) and (col2<%s))"
#condition = "((col3>=%s) and (col3<%s)) or ((col1>%s) and (col1<%s))"
condition += " and ((col1+3.1*col2+col3*col4) > 3)"
#condition += " and (sqrt(col1^2+col2^2+col3^2+col4^2) > .1)"
condition = condition % (column, inf2, column, sup2, inf1, sup1)
# print "condition-->", condition
self.cur.execute(
# "select sum(%s) from %s where %s" %
"select %s from %s where %s" %
(column, TABLE_NAME, condition))
#results = self.flatten(self.cur.fetchall())
results = self.cur.fetchall()
#results = self.cur.fetchall()
# print "results-->", results
# return results
return len(results)
def close_db(self, con):
self.cur.close()
con.close()
| bsd-3-clause |
fedelemantuano/thug | thug/DOM/Plugin.py | 3 | 1536 | #!/usr/bin/env python
#
# Plugin.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
class Plugin(dict):
"""A dictionary with attribute-style access. It maps attribute access to
the real dictionary. """
def __init__(self, init = None):
if init is None:
init = dict()
dict.__init__(self, init)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(Plugin, self).__setitem__(key, value)
def __getitem__(self, name):
return super(Plugin, self).__getitem__(name)
def __delitem__(self, name):
return super(Plugin, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
| gpl-2.0 |
AnonymousBee/anonymousbee.github.io | tools/arbitrage/mb.py | 10 | 3129 | #!/usr/bin/env python
import sys
import urllib2
from time import sleep
import json
from arbitrator import BlinkTradeArbitrator
import datetime
import hmac
import hashlib
import ConfigParser
from ws4py.exc import HandshakeError
MB_API_KEY = 'XXXX'
MB_API_SECRET = 'YYYY'
def send_order_to_MB(sender, order):
nonce = datetime.datetime.now().strftime('%s')
message = 'sendorder' + str(MB_API_KEY) + str(nonce)
signature = hmac.new(MB_API_SECRET, msg=message, digestmod=hashlib.sha256).hexdigest().upper()
post_params = {
'key': MB_API_KEY,
'sign': signature,
'pair': 'btc_brl',
'volume': float(order['OrderQty']/1.e8),
'price': float( order['Price'] / 1.e8)
}
if msg['Side'] == '1':
post_params['type'] = 'buy'
elif msg['Side'] == '2':
post_params['type'] = 'sell'
print datetime.datetime.now(), 'POST https://www.mercadobitcoin.com.br/tapi/' + message, str(post_params)
def main():
candidates = ['arbitrage.ini', 'mb.ini' ]
if len(sys.argv) > 1:
candidates.append(sys.argv[1])
config = ConfigParser.SafeConfigParser({
'websocket_url': 'wss://127.0.0.1/trade/',
'username': '',
'password': '',
'buy_fee': 0,
'sell_fee': 0,
'api_key': 'KEY',
'api_secret': 'SECRET'
})
config.read( candidates )
websocket_url = config.get('mb', 'websocket_url')
username = config.get('mb', 'username')
password = config.get('mb', 'password')
buy_fee = int(config.get('mb', 'buy_fee'))
sell_fee = int(config.get('mb', 'sell_fee'))
api_key = config.get('mb', 'api_key')
api_secret = config.get('mb', 'api_secret')
broker_id = config.getint('mb', 'broker_id')
dest_market = config.get('mb', 'dest_market')
print 'websocket_url:', websocket_url
print 'username:', username
print 'buy_fee:', buy_fee
print 'sell_fee:', sell_fee
arbitrator = BlinkTradeArbitrator(broker_id, username,password,websocket_url, dest_market)
arbitrator.connect()
arbitrator.signal_order.connect(send_order_to_MB)
while True:
try:
sleep(5)
if arbitrator.is_connected():
arbitrator.send_testRequest()
else:
try:
arbitrator.reconnect()
except HandshakeError,e:
continue
try:
raw_data = urllib2.urlopen('https://www.mercadobitcoin.com.br/api/orderbook/').read()
except Exception:
print 'ERROR RETRIEVING ORDER BOOK'
continue
bids_asks = []
try:
bids_asks = json.loads(raw_data)
except Exception :
pass
if bids_asks:
ask_list = [ [ int(float(o[0]) * 1e8 * (1. + sell_fee) ) , int(o[1] * 1e8) ] for o in bids_asks['asks'] ]
bid_list = [ [ int(float(o[0]) * 1e8 * (1. + buy_fee) ) , int(o[1] * 1e8) ] for o in bids_asks['bids'] ]
arbitrator.process_ask_list(ask_list)
arbitrator.process_bid_list(bid_list)
except urllib2.URLError as e:
print datetime.datetime.now(), e
except KeyboardInterrupt:
arbitrator.cancel_all_orders()
print 'wait....'
sleep(5)
arbitrator.close()
break
main()
| gpl-3.0 |
alisidd/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/decisions_to_data_then_nn.py | 190 | 1889 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A model that places a decision tree embedding before a neural net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import decisions_to_data
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class DecisionsToDataThenNN(hybrid_model.HybridModel):
"""A model that places a decision tree embedding before a neural net."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(DecisionsToDataThenNN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [decisions_to_data.DecisionsToDataLayer(params,
0, device_assigner),
fully_connected.FullyConnectedLayer(
params, 1, device_assigner=device_assigner)]
| apache-2.0 |
HyperBaton/ansible | test/units/modules/network/check_point/test_cp_mgmt_threat_profile_facts.py | 19 | 2869 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_threat_profile_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'threat-profile'
api_call_object_plural_version = 'threat-profiles'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointThreatProfileFacts(object):
module = cp_mgmt_threat_profile_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
yuanming-hu/taichi | python/taichi/lang/ops.py | 1 | 13930 | import builtins
import ctypes
import functools
import math
import operator as _bt_ops_mod # bt for builtin
import traceback
from taichi.core.util import ti_core as _ti_core
from taichi.lang import impl, matrix
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.expr import Expr, make_expr_group
from taichi.lang.util import cook_dtype, is_taichi_class, taichi_scope
unary_ops = []
def stack_info():
s = traceback.extract_stack()[3:-1]
for i, l in enumerate(s):
if 'taichi_ast_generator' in l:
s = s[i + 1:]
break
raw = ''.join(traceback.format_list(s))
# remove the confusing last line
return '\n'.join(raw.split('\n')[:-5]) + '\n'
def is_taichi_expr(a):
return isinstance(a, Expr)
def wrap_if_not_expr(a):
_taichi_skip_traceback = 1
return Expr(a) if not is_taichi_expr(a) else a
def unary(foo):
@functools.wraps(foo)
def imp_foo(x):
_taichi_skip_traceback = 2
return foo(x)
@functools.wraps(foo)
def wrapped(a):
_taichi_skip_traceback = 1
if is_taichi_class(a):
return a.element_wise_unary(imp_foo)
else:
return imp_foo(a)
return wrapped
binary_ops = []
def binary(foo):
@functools.wraps(foo)
def imp_foo(x, y):
_taichi_skip_traceback = 2
return foo(x, y)
@functools.wraps(foo)
def rev_foo(x, y):
_taichi_skip_traceback = 2
return foo(y, x)
@functools.wraps(foo)
def wrapped(a, b):
_taichi_skip_traceback = 1
if is_taichi_class(a):
return a.element_wise_binary(imp_foo, b)
elif is_taichi_class(b):
return b.element_wise_binary(rev_foo, a)
else:
return imp_foo(a, b)
binary_ops.append(wrapped)
return wrapped
ternary_ops = []
def ternary(foo):
@functools.wraps(foo)
def abc_foo(a, b, c):
_taichi_skip_traceback = 2
return foo(a, b, c)
@functools.wraps(foo)
def bac_foo(b, a, c):
_taichi_skip_traceback = 2
return foo(a, b, c)
@functools.wraps(foo)
def cab_foo(c, a, b):
_taichi_skip_traceback = 2
return foo(a, b, c)
@functools.wraps(foo)
def wrapped(a, b, c):
_taichi_skip_traceback = 1
if is_taichi_class(a):
return a.element_wise_ternary(abc_foo, b, c)
elif is_taichi_class(b):
return b.element_wise_ternary(bac_foo, a, c)
elif is_taichi_class(c):
return c.element_wise_ternary(cab_foo, a, b)
else:
return abc_foo(a, b, c)
ternary_ops.append(wrapped)
return wrapped
writeback_binary_ops = []
def writeback_binary(foo):
@functools.wraps(foo)
def imp_foo(x, y):
_taichi_skip_traceback = 2
return foo(x, wrap_if_not_expr(y))
@functools.wraps(foo)
def wrapped(a, b):
_taichi_skip_traceback = 1
if is_taichi_class(a):
return a.element_wise_writeback_binary(imp_foo, b)
elif is_taichi_class(b):
raise TaichiSyntaxError(
f'cannot augassign taichi class {type(b)} to scalar expr')
else:
return imp_foo(a, b)
writeback_binary_ops.append(wrapped)
return wrapped
def cast(obj, dtype):
_taichi_skip_traceback = 1
dtype = cook_dtype(dtype)
if is_taichi_class(obj):
# TODO: unify with element_wise_unary
return obj.cast(dtype)
else:
return Expr(_ti_core.value_cast(Expr(obj).ptr, dtype))
def bit_cast(obj, dtype):
_taichi_skip_traceback = 1
dtype = cook_dtype(dtype)
if is_taichi_class(obj):
raise ValueError('Cannot apply bit_cast on Taichi classes')
else:
return Expr(_ti_core.bits_cast(Expr(obj).ptr, dtype))
def _unary_operation(taichi_op, python_op, a):
_taichi_skip_traceback = 1
if is_taichi_expr(a):
return Expr(taichi_op(a.ptr), tb=stack_info())
else:
return python_op(a)
def _binary_operation(taichi_op, python_op, a, b):
_taichi_skip_traceback = 1
if is_taichi_expr(a) or is_taichi_expr(b):
a, b = wrap_if_not_expr(a), wrap_if_not_expr(b)
return Expr(taichi_op(a.ptr, b.ptr), tb=stack_info())
else:
return python_op(a, b)
def _ternary_operation(taichi_op, python_op, a, b, c):
_taichi_skip_traceback = 1
if is_taichi_expr(a) or is_taichi_expr(b) or is_taichi_expr(c):
a, b, c = wrap_if_not_expr(a), wrap_if_not_expr(b), wrap_if_not_expr(c)
return Expr(taichi_op(a.ptr, b.ptr, c.ptr), tb=stack_info())
else:
return python_op(a, b, c)
@unary
def neg(a):
return _unary_operation(_ti_core.expr_neg, _bt_ops_mod.neg, a)
@unary
def sin(a):
return _unary_operation(_ti_core.expr_sin, math.sin, a)
@unary
def cos(a):
return _unary_operation(_ti_core.expr_cos, math.cos, a)
@unary
def asin(a):
return _unary_operation(_ti_core.expr_asin, math.asin, a)
@unary
def acos(a):
return _unary_operation(_ti_core.expr_acos, math.acos, a)
@unary
def sqrt(a):
return _unary_operation(_ti_core.expr_sqrt, math.sqrt, a)
@unary
def rsqrt(a):
def _rsqrt(a):
return 1 / math.sqrt(a)
return _unary_operation(_ti_core.expr_rsqrt, _rsqrt, a)
@unary
def floor(a):
return _unary_operation(_ti_core.expr_floor, math.floor, a)
@unary
def ceil(a):
return _unary_operation(_ti_core.expr_ceil, math.ceil, a)
@unary
def tan(a):
return _unary_operation(_ti_core.expr_tan, math.tan, a)
@unary
def tanh(a):
return _unary_operation(_ti_core.expr_tanh, math.tanh, a)
@unary
def exp(a):
return _unary_operation(_ti_core.expr_exp, math.exp, a)
@unary
def log(a):
return _unary_operation(_ti_core.expr_log, math.log, a)
@unary
def abs(a):
return _unary_operation(_ti_core.expr_abs, builtins.abs, a)
@unary
def bit_not(a):
return _unary_operation(_ti_core.expr_bit_not, _bt_ops_mod.invert, a)
@unary
def logical_not(a):
return _unary_operation(_ti_core.expr_logic_not, lambda x: int(not x), a)
def random(dtype=float):
dtype = cook_dtype(dtype)
x = Expr(_ti_core.make_rand_expr(dtype))
return impl.expr_init(x)
# NEXT: add matpow(self, power)
@binary
def add(a, b):
return _binary_operation(_ti_core.expr_add, _bt_ops_mod.add, a, b)
@binary
def sub(a, b):
return _binary_operation(_ti_core.expr_sub, _bt_ops_mod.sub, a, b)
@binary
def mul(a, b):
return _binary_operation(_ti_core.expr_mul, _bt_ops_mod.mul, a, b)
@binary
def mod(a, b):
def expr_python_mod(a, b):
# a % b = (a // b) * b - a
quotient = Expr(_ti_core.expr_floordiv(a, b))
multiply = Expr(_ti_core.expr_mul(b, quotient.ptr))
return _ti_core.expr_sub(a, multiply.ptr)
return _binary_operation(expr_python_mod, _bt_ops_mod.mod, a, b)
@binary
def pow(a, b):
return _binary_operation(_ti_core.expr_pow, _bt_ops_mod.pow, a, b)
@binary
def floordiv(a, b):
return _binary_operation(_ti_core.expr_floordiv, _bt_ops_mod.floordiv, a,
b)
@binary
def truediv(a, b):
return _binary_operation(_ti_core.expr_truediv, _bt_ops_mod.truediv, a, b)
@binary
def max(a, b):
return _binary_operation(_ti_core.expr_max, builtins.max, a, b)
@binary
def min(a, b):
return _binary_operation(_ti_core.expr_min, builtins.min, a, b)
@binary
def atan2(a, b):
return _binary_operation(_ti_core.expr_atan2, math.atan2, a, b)
@binary
def raw_div(a, b):
def c_div(a, b):
if isinstance(a, int) and isinstance(b, int):
return a // b
else:
return a / b
return _binary_operation(_ti_core.expr_div, c_div, a, b)
@binary
def raw_mod(a, b):
def c_mod(a, b):
return a - b * int(float(a) / b)
return _binary_operation(_ti_core.expr_mod, c_mod, a, b)
@binary
def cmp_lt(a, b):
return _binary_operation(_ti_core.expr_cmp_lt, lambda a, b: -int(a < b), a,
b)
@binary
def cmp_le(a, b):
return _binary_operation(_ti_core.expr_cmp_le, lambda a, b: -int(a <= b),
a, b)
@binary
def cmp_gt(a, b):
return _binary_operation(_ti_core.expr_cmp_gt, lambda a, b: -int(a > b), a,
b)
@binary
def cmp_ge(a, b):
return _binary_operation(_ti_core.expr_cmp_ge, lambda a, b: -int(a >= b),
a, b)
@binary
def cmp_eq(a, b):
return _binary_operation(_ti_core.expr_cmp_eq, lambda a, b: -int(a == b),
a, b)
@binary
def cmp_ne(a, b):
return _binary_operation(_ti_core.expr_cmp_ne, lambda a, b: -int(a != b),
a, b)
@binary
def bit_or(a, b):
return _binary_operation(_ti_core.expr_bit_or, _bt_ops_mod.or_, a, b)
@binary
def bit_and(a, b):
return _binary_operation(_ti_core.expr_bit_and, _bt_ops_mod.and_, a, b)
@binary
def bit_xor(a, b):
return _binary_operation(_ti_core.expr_bit_xor, _bt_ops_mod.xor, a, b)
@binary
def bit_shl(a, b):
return _binary_operation(_ti_core.expr_bit_shl, _bt_ops_mod.lshift, a, b)
@binary
def bit_sar(a, b):
return _binary_operation(_ti_core.expr_bit_sar, _bt_ops_mod.rshift, a, b)
@taichi_scope
@binary
def bit_shr(a, b):
return _binary_operation(_ti_core.expr_bit_shr, _bt_ops_mod.rshift, a, b)
# We don't have logic_and/or instructions yet:
logical_or = bit_or
logical_and = bit_and
@ternary
def select(cond, a, b):
# TODO: systematically resolve `-1 = True` problem by introducing u1:
cond = logical_not(logical_not(cond))
def py_select(cond, a, b):
return a * cond + b * (1 - cond)
return _ternary_operation(_ti_core.expr_select, py_select, cond, a, b)
@writeback_binary
def atomic_add(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_add(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_sub(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_sub(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_min(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_min(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_max(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_max(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_and(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_bit_and(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_or(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_bit_or(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def atomic_xor(a, b):
return impl.expr_init(
Expr(_ti_core.expr_atomic_bit_xor(a.ptr, b.ptr), tb=stack_info()))
@writeback_binary
def assign(a, b):
_ti_core.expr_assign(a.ptr, b.ptr, stack_info())
return a
def ti_max(*args):
num_args = len(args)
assert num_args >= 1
if num_args == 1:
return args[0]
elif num_args == 2:
return max(args[0], args[1])
else:
return max(args[0], ti_max(*args[1:]))
def ti_min(*args):
num_args = len(args)
assert num_args >= 1
if num_args == 1:
return args[0]
elif num_args == 2:
return min(args[0], args[1])
else:
return min(args[0], ti_min(*args[1:]))
def ti_any(a):
return a.any()
def ti_all(a):
return a.all()
def append(l, indices, val):
a = impl.expr_init(
_ti_core.insert_append(l.snode.ptr, make_expr_group(indices),
Expr(val).ptr))
return a
def external_func_call(func, args=[], outputs=[]):
func_addr = ctypes.cast(func, ctypes.c_void_p).value
_ti_core.insert_external_func_call(func_addr, '', make_expr_group(args),
make_expr_group(outputs))
def asm(source, inputs=[], outputs=[]):
_ti_core.insert_external_func_call(0, source, make_expr_group(inputs),
make_expr_group(outputs))
def is_active(l, indices):
return Expr(
_ti_core.insert_is_active(l.snode.ptr, make_expr_group(indices)))
def activate(l, indices):
_ti_core.insert_activate(l.snode.ptr, make_expr_group(indices))
def deactivate(l, indices):
_ti_core.insert_deactivate(l.snode.ptr, make_expr_group(indices))
def length(l, indices):
return Expr(_ti_core.insert_len(l.snode.ptr, make_expr_group(indices)))
def rescale_index(a, b, I):
"""Rescales the index 'I' of field 'a' the match the shape of field 'b'
Parameters
----------
a: ti.field(), ti.Vector.field, ti.Matrix.field()
input taichi field
b: ti.field(), ti.Vector.field, ti.Matrix.field()
output taichi field
I: ti.Vector()
grouped loop index
Returns
-------
Ib: ti.Vector()
rescaled grouped loop index
"""
assert isinstance(a, Expr) and a.is_global(), \
f"first arguement must be a field"
assert isinstance(b, Expr) and b.is_global(), \
f"second arguement must be a field"
assert isinstance(I, matrix.Matrix) and not I.is_global(), \
f"third arguement must be a grouped index"
Ib = I.copy()
for n in range(min(I.n, min(len(a.shape), len(b.shape)))):
if a.shape[n] > b.shape[n]:
Ib.entries[n] = I.entries[n] // (a.shape[n] // b.shape[n])
if a.shape[n] < b.shape[n]:
Ib.entries[n] = I.entries[n] * (b.shape[n] // a.shape[n])
return Ib
def get_addr(f, indices):
"""Returns the address (for CUDA/x64) for index `indices` of field `f`. Currently, this function can only be called inside a kernel.
Args:
f (ti.field(), ti.Vector.field, ti.Matrix.field()): input taichi field
indices (int, ti.Vector()): field indices
Returns:
ti.u64: The address of `f[indices]`.
"""
return Expr(_ti_core.expr_get_addr(f.snode.ptr, make_expr_group(indices)))
| mit |
skulbrane/metagoofil | unzip.py | 25 | 3848 | """ unzip.py
Version: 1.1
Extract a zipfile to the directory provided
It first creates the directory structure to house the files
then it extracts the files to it.
Sample usage:
command line
unzip.py -p 10 -z c:\testfile.zip -o c:\testoutput
python class
import unzip
un = unzip.unzip()
un.extract(r'c:\testfile.zip', 'c:\testoutput')
By Doug Tolton
"""
import sys
import zipfile
import os
import os.path
import getopt
class unzip:
def __init__(self, verbose = False, percent = 10):
self.verbose = False
self.percent = percent
def extract(self, file, dir):
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
zf = zipfile.ZipFile(file)
# create directory structure to house files
self._createstructure(file, dir)
num_files = len(zf.namelist())
percent = self.percent
divisions = 100 / percent
perc = int(num_files / divisions)
# extract files to directory structure
for i, name in enumerate(zf.namelist()):
if self.verbose == True:
print "Extracting %s" % name
elif perc > 0 and (i % perc) == 0 and i > 0:
complete = int (i / perc) * percent
#print "%s%% complete" % complete
if not name.endswith('/'):
outfile = open(os.path.join(dir, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def _makedirs(self, directories, basedir):
""" Create any directories that don't currently exist """
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.mkdir(curdir)
#print("dir-->"+str(curdir))
def _listdirs(self, file):
""" Grabs all the directories in the zip structure
This is necessary to create the structure before trying
to extract the file to it. """
zf = zipfile.ZipFile(file)
dirs = []
#print str(zf.namelist())
for name in zf.namelist():
dirsname = name.split("/")
ant=""
for dirname in dirsname[:-1]:
dirs.append(ant+dirname)
#print "anadiendo:"+(ant+dirname)
ant=ant+dirname+"/"
dirs.sort()
return dirs
def usage():
print """usage: unzip.py -z <zipfile> -o <targetdir>
<zipfile> is the source zipfile to extract
<targetdir> is the target destination
-z zipfile to extract
-o target location
-p sets the percentage notification
-v sets the extraction to verbose (overrides -p)
long options also work:
--verbose
--percent=10
--zipfile=<zipfile>
--outdir=<targetdir>"""
def main():
shortargs = 'vhp:z:o:'
longargs = ['verbose', 'help', 'percent=', 'zipfile=', 'outdir=']
unzipper = unzip()
try:
opts, args = getopt.getopt(sys.argv[1:], shortargs, longargs)
except getopt.GetoptError:
usage()
sys.exit(2)
zipsource = ""
zipdest = ""
for o, a in opts:
if o in ("-v", "--verbose"):
unzipper.verbose = True
if o in ("-p", "--percent"):
if not unzipper.verbose == True:
unzipper.percent = int(a)
if o in ("-z", "--zipfile"):
zipsource = a
if o in ("-o", "--outdir"):
zipdest = a
if o in ("-h", "--help"):
usage()
sys.exit()
if zipsource == "" or zipdest == "":
usage()
sys.exit()
unzipper.extract(zipsource, zipdest)
if __name__ == '__main__': main() | gpl-2.0 |
abravalheri/pypiple | pypiple/index.py | 1 | 8399 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pypiple index
-------------
Domain logic behing pypiple.
The class ``pypiple.index.Index`` is used to build a logic package index.
This index contains meta-information about all packages inside a given
directory, like file paht, author, homepage, etc and provides fast lookup
search methods. The index is also groups different versions of the same
package and is able to recognize if the cached metadata is uptodate with
the underlaying file system.
.. _PKG_FIELDS:
.. data:: PKG_FIELDS
list of metadata to be retrieved from package
.. data:: PKG_DECODERS
mechanism used to extract package information, according to extension
"""
import logging
from glob import glob
from itertools import groupby
from operator import add, itemgetter
from os.path import basename, getmtime, join, splitext
from time import time
import pkginfo
from property_manager import PropertyManager, cached_property
from six.moves import reduce # noqa, pylint: disable=redefined-builtin
from pypiple import __version__ # noqa
__author__ = 'Anderson Bravalheri'
__copyright__ = 'Anderson Bravalheri'
__license__ = 'Mozilla Public License Version 2.0'
LOGGER = logging.getLogger(__name__)
PKG_FIELDS = (
'name', 'version', 'summary', 'home_page ', 'description', 'keywords',
'platform', 'classifiers', 'download_url', 'author', 'author_email',
'maintainer', 'maintainer_email',
)
PKG_DECODERS = {
'whl': pkginfo.Wheel,
'egg': pkginfo.BDist,
'tar.gz': pkginfo.SDist,
}
def filter_info(info):
"""Select most relevant information about package.
Arguments:
info (object): object with all attributes defined in PKG_FIELDS_.
Returns:
A dict with all keys in defined PKG_FIELDS_.
"""
filtered = {field: getattr(info, field) for field in PKG_FIELDS}
if not info.maintainer:
filtered['maintainer'] = info.author
filtered['maintainer_email'] = info.author_email
return filtered
def retrieve_data(path):
"""Retrieve metadata about a python package.
Arguments:
path (string): path to the package
Returns:
A dict with all keys defined in PKG_FIELDS_.
"""
try:
_, ext = splitext(path)
info = PKG_DECODERS[ext](path)
data = filter_info(info)
data['mtime'] = getmtime(path)
return data
except (RuntimeError, ValueError):
LOGGER.error('Unnable to read information about %s', basename(path))
def extract_version(pkg):
"""Produce a comparable object from package version string.
This functions assumes the package uses Semantic Versioning conventions.
See `<http://semver.org>`_
Arguments:
pkg: ``dict``-like object containing package metadata.
Required key: ``version``.
Returns:
tuple: components of a semantic version
"""
relevant = pkg['version'].split('+')[0] # ignore build info
components = relevant.split('-')
main = components[0]
alias = components[1] if len(components) > 1 else '' # e.g.: alpha, beta
return tuple(main.split('.') + [alias])
class Index(PropertyManager):
"""Index of python packages inside a given directory path.
This class assumes all packages are store into a single directory.
The ``update`` method is used to sync the in-memory index with the
current state of the storage directory.
.. _support:
Note:
The following package formats are supported:
``*.whl`` - binary packages created with ``setup.py bdist_wheel``
``*.egg`` - binary packages created with ``setup.py bdist_egg``
``*.tar.gz`` - source packages created with ``setup.py sdist``
Package format is deduced from file extension.
"""
def __init__(self, path):
"""Cache-enabled index generator instance.
After created the index is empty. In order to synchronize its contents
with the underlaying directory, please use the method ``update``.
Arguments:
path (str): path to the directory used to store packages
"""
super(Index, self).__init__()
self.path = path
self._mtime = None # => last index update
self._metadata = {} # => primary source of true
def uptodate(self):
"""Discover if the index cache is uptodate.
Returns:
True if no change in index directory since the last update
"""
mtime = self.mtime()
return mtime and mtime >= getmtime(self.path)
def mtime(self, pkg=None):
"""Retrieve the time instant when the index where updated.
Keyword Arguments:
pkg (string): path to a package. When given, this method will
return the mtime of the file, read during the last update.
Default is None.
Returns:
Time instant for the last update in index, or the cached mtime
value for a specified package.
"""
if pkg:
return self.metadata[pkg]['mtime'] # pylint: disable=unsubscriptable-object
return self._mtime
def scan(self):
"""Scan the index directory searching for python packages.
See support_.
Returns:
List of paths for package files inside index directory.
"""
types = PKG_DECODERS.keys()
pkgs = [glob(join(self.path, '*.{}'.format(ext))) for ext in types]
return reduce(add, pkgs) # merge arrays
def diff(self, pkgs):
"""Compute the difference between index cache and the given list
of paths for packages.
Arguments:
pks (List[str]): List of paths pointing to python packages
Returns:
Tuple with 3 elements.
The first element is a list of packages present in the given list
but absent in the index cache.
The second element is a list of packages present in both, but
have been modified.
The last element is a list of packages absent in the given list,
but present in the index cache.
"""
cached = set(self.files)
current = set(pkgs)
added = current - cached
removed = cached - current
suspects = current & cached # intersection
dirty = {pkg for pkg in suspects if getmtime(pkg) > self.mtime(pkg)}
return (added, dirty, removed)
def update(self):
"""Update index cache based on the current state of the directory.
Returns:
Tuple with 2 elements.
The first element is a list of packages modified since
the last update.
The second element is a list of packages removed since
the last update.
"""
if self.uptodate():
return None
current = self.scan()
(added, dirty, removed) = self.diff(current)
for path in removed:
del self._metadata[path]
modified = added | dirty # union off sets
self._metadata.update(
{path: retrieve_data(path) for path in modified})
# retrieve_data will return None if pkg decoding fails,
# therefore, it's necessary to check null values
# Expire cache: be lazy and regenerate it on demand
self.clear_cached_properties()
# Store 'last-updated' info
self._mtime = time()
return (modified, removed)
@cached_property
def files(self):
"""List of indexed files
Lazy generated list containing all the files inside index
directory whose type is supported.
See support_.
"""
return self.metadata.keys() # pylint: disable=no-member
@cached_property
def metadata(self):
"""List of metadata about packages
Lazy generated list containing all the metadata about indexed packages.
"""
return self._metadata
@cached_property
def packages(self):
"""List of packages
Lazy generated dictionary containing all different versions
for each package, indexed by its name.
"""
cache = self.metadata.values() # pylint: disable=no-member
return {
name: sorted(infos, key=extract_version, reverse=True)
for name, infos in groupby(cache, key=itemgetter('name'))
}
| mpl-2.0 |
sqlalchemy/sqlalchemy | lib/sqlalchemy/types.py | 3 | 2883 | # types.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Compatibility namespace for sqlalchemy.sql.types.
"""
__all__ = [
"TypeEngine",
"TypeDecorator",
"UserDefinedType",
"INT",
"CHAR",
"VARCHAR",
"NCHAR",
"NVARCHAR",
"TEXT",
"Text",
"FLOAT",
"NUMERIC",
"REAL",
"DECIMAL",
"TIMESTAMP",
"DATETIME",
"CLOB",
"BLOB",
"BINARY",
"VARBINARY",
"BOOLEAN",
"BIGINT",
"SMALLINT",
"INTEGER",
"DATE",
"TIME",
"String",
"Integer",
"SmallInteger",
"BigInteger",
"Numeric",
"Float",
"DateTime",
"Date",
"Time",
"LargeBinary",
"Boolean",
"Unicode",
"Concatenable",
"UnicodeText",
"PickleType",
"Interval",
"Enum",
"Indexable",
"ARRAY",
"JSON",
]
from .sql.sqltypes import _Binary
from .sql.sqltypes import ARRAY
from .sql.sqltypes import BIGINT
from .sql.sqltypes import BigInteger
from .sql.sqltypes import BINARY
from .sql.sqltypes import BLOB
from .sql.sqltypes import BOOLEAN
from .sql.sqltypes import Boolean
from .sql.sqltypes import CHAR
from .sql.sqltypes import CLOB
from .sql.sqltypes import Concatenable
from .sql.sqltypes import DATE
from .sql.sqltypes import Date
from .sql.sqltypes import DATETIME
from .sql.sqltypes import DateTime
from .sql.sqltypes import DECIMAL
from .sql.sqltypes import Enum
from .sql.sqltypes import FLOAT
from .sql.sqltypes import Float
from .sql.sqltypes import Indexable
from .sql.sqltypes import INT
from .sql.sqltypes import INTEGER
from .sql.sqltypes import Integer
from .sql.sqltypes import Interval
from .sql.sqltypes import JSON
from .sql.sqltypes import LargeBinary
from .sql.sqltypes import MatchType
from .sql.sqltypes import NCHAR
from .sql.sqltypes import NULLTYPE
from .sql.sqltypes import NullType
from .sql.sqltypes import NUMERIC
from .sql.sqltypes import Numeric
from .sql.sqltypes import NVARCHAR
from .sql.sqltypes import PickleType
from .sql.sqltypes import REAL
from .sql.sqltypes import SchemaType
from .sql.sqltypes import SMALLINT
from .sql.sqltypes import SmallInteger
from .sql.sqltypes import String
from .sql.sqltypes import STRINGTYPE
from .sql.sqltypes import TEXT
from .sql.sqltypes import Text
from .sql.sqltypes import TIME
from .sql.sqltypes import Time
from .sql.sqltypes import TIMESTAMP
from .sql.sqltypes import Unicode
from .sql.sqltypes import UnicodeText
from .sql.sqltypes import VARBINARY
from .sql.sqltypes import VARCHAR
from .sql.type_api import adapt_type
from .sql.type_api import to_instance
from .sql.type_api import TypeDecorator
from .sql.type_api import TypeEngine
from .sql.type_api import UserDefinedType
from .sql.type_api import Variant
| mit |
Ant-OS/android_packages_apps_OTAUpdates | jni/boost_1_57_0/tools/build/test/sort_rule.py | 51 | 2493 | #!/usr/bin/python
# Copyright (C) 2008. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests for the Boost Jam builtin SORT rule.
import BoostBuild
###############################################################################
#
# testSORTCorrectness()
# ---------------------
#
###############################################################################
def testSORTCorrectness():
"""Testing that Boost Jam's SORT builtin rule actually sorts correctly."""
t = BoostBuild.Tester(["-ftest.jam", "-d1"], pass_toolset=False,
use_test_config=False)
t.write("test.jam", """\
NOCARE all ;
source-data = 1 8 9 2 7 3 4 7 1 27 27 9 98 98 1 1 4 5 6 2 3 4 8 1 -2 -2 0 0 0 ;
target-data = -2 -2 0 0 0 1 1 1 1 1 2 2 27 27 3 3 4 4 4 5 6 7 7 8 8 9 9 98 98 ;
ECHO "starting up" ;
sorted-data = [ SORT $(source-data) ] ;
ECHO "done" ;
if $(sorted-data) != $(target-data)
{
ECHO "Source :" $(source-data) ;
ECHO "Expected :" $(target-data) ;
ECHO "SORT returned:" $(sorted-data) ;
EXIT "SORT error" : -2 ;
}
""")
t.run_build_system()
t.expect_output_lines("starting up")
t.expect_output_lines("done")
t.expect_output_lines("SORT error", False)
t.cleanup()
###############################################################################
#
# testSORTDuration()
# ------------------
#
###############################################################################
def testSORTDuration():
"""
Regression test making sure Boost Jam's SORT builtin rule does not get
quadratic behaviour again in this use case.
"""
t = BoostBuild.Tester(["-ftest.jam", "-d1"], pass_toolset=False,
use_test_config=False)
f = open(t.workpath("test.jam"), "w")
print >> f, "data = "
for i in range(0, 20000):
if i % 2:
print >> f, '"aaa"'
else:
print >> f, '"bbb"'
print >> f, """;
ECHO "starting up" ;
sorted = [ SORT $(data) ] ;
ECHO "done" ;
NOCARE all ;
"""
f.close()
t.run_build_system(expected_duration=1)
t.expect_output_lines("starting up")
t.expect_output_lines("done")
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
testSORTCorrectness()
testSORTDuration()
| apache-2.0 |
travisfcollins/gnuradio | gr-vocoder/examples/g723_40_audio_loopback.py | 58 | 1477 | #!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.g723_40_encode_sb()
dec = vocoder.g723_40_decode_bs()
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
| gpl-3.0 |
frnhr/django-cms | cms/south_migrations/0004_textobjects.py | 1680 | 20032 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
KylinMod/android_kernel_motorola_msm8960-common | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
ashhher3/invenio | modules/websession/lib/webuser.py | 6 | 58171 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This file implements all methods necessary for working with users and
sessions in Invenio. Contains methods for logging/registration
when a user log/register into the system, checking if it is a guest
user or not.
At the same time this presents all the stuff it could need with
sessions managements, working with websession.
It also contains Apache-related user authentication stuff.
"""
__revision__ = "$Id$"
import cgi
import urllib
import urlparse
import socket
import smtplib
import re
import random
import datetime
from socket import gaierror
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_ACCESS_CONTROL_LEVEL_GUESTS, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN, \
CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS, \
CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT, \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_URL, \
CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS, \
CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_BIBAUTHORID_ENABLED, \
CFG_SITE_RECORD
try:
from invenio.session import get_session
except ImportError:
pass
from invenio.dbquery import run_sql, OperationalError, \
serialize_via_marshal, deserialize_via_marshal
from invenio.access_control_admin import acc_get_role_id, acc_get_action_roles, acc_get_action_id, acc_is_user_in_role, acc_find_possible_activities
from invenio.access_control_mailcookie import mail_cookie_create_mail_activation
from invenio.access_control_firerole import acc_firerole_check_user, load_role_definition
from invenio.access_control_config import SUPERADMINROLE, CFG_EXTERNAL_AUTH_USING_SSO
from invenio.messages import gettext_set_language, wash_languages, wash_language
from invenio.mailutils import send_email
from invenio.errorlib import register_exception
from invenio.webgroup_dblayer import get_groups
from invenio.external_authentication import InvenioWebAccessExternalAuthError
from invenio.access_control_config import CFG_EXTERNAL_AUTHENTICATION, \
CFG_WEBACCESS_MSGS, CFG_WEBACCESS_WARNING_MSGS, CFG_EXTERNAL_AUTH_DEFAULT, \
CFG_TEMP_EMAIL_ADDRESS
from invenio.webuser_config import CFG_WEBUSER_USER_TABLES
import invenio.template
tmpl = invenio.template.load('websession')
re_invalid_nickname = re.compile(""".*[,'@]+.*""")
# pylint: disable=C0301
def createGuestUser():
"""Create a guest user , insert into user null values in all fields
createGuestUser() -> GuestUserID
"""
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
try:
return run_sql("insert into user (email, note) values ('', '1')")
except OperationalError:
return None
else:
try:
return run_sql("insert into user (email, note) values ('', '0')")
except OperationalError:
return None
def page_not_authorized(req, referer='', uid='', text='', navtrail='', ln=CFG_SITE_LANG,
navmenuid=""):
"""Show error message when user is not authorized to do something.
@param referer: in case the displayed message propose a login link, this
is the url to return to after logging in. If not specified it is guessed
from req.
@param uid: the uid of the user. If not specified it is guessed from req.
@param text: the message to be displayed. If not specified it will be
guessed from the context.
"""
from invenio.webpage import page
_ = gettext_set_language(ln)
if not referer:
referer = req.unparsed_uri
if not CFG_ACCESS_CONTROL_LEVEL_SITE:
title = CFG_WEBACCESS_MSGS[5]
if not uid:
uid = getUid(req)
try:
res = run_sql("SELECT email FROM user WHERE id=%s AND note=1", (uid,))
if res and res[0][0]:
if text:
body = text
else:
body = "%s %s" % (CFG_WEBACCESS_WARNING_MSGS[9] % cgi.escape(res[0][0]),
("%s %s" % (CFG_WEBACCESS_MSGS[0] % urllib.quote(referer), CFG_WEBACCESS_MSGS[1])))
else:
if text:
body = text
else:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 1:
body = CFG_WEBACCESS_MSGS[3]
else:
body = CFG_WEBACCESS_WARNING_MSGS[4] + CFG_WEBACCESS_MSGS[2]
except OperationalError, e:
body = _("Database problem") + ': ' + str(e)
elif CFG_ACCESS_CONTROL_LEVEL_SITE == 1:
title = CFG_WEBACCESS_MSGS[8]
body = "%s %s" % (CFG_WEBACCESS_MSGS[7], CFG_WEBACCESS_MSGS[2])
elif CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
title = CFG_WEBACCESS_MSGS[6]
body = "%s %s" % (CFG_WEBACCESS_MSGS[4], CFG_WEBACCESS_MSGS[2])
return page(title=title,
language=ln,
uid=getUid(req),
body=body,
navtrail=navtrail,
req=req,
navmenuid=navmenuid)
def getUid(req):
"""Return user ID taking it from the cookie of the request.
Includes control mechanism for the guest users, inserting in
the database table when need be, raising the cookie back to the
client.
User ID is set to 0 when client refuses cookie or we are in the
read-only site operation mode.
User ID is set to -1 when we are in the permission denied site
operation mode.
getUid(req) -> userId
"""
if hasattr(req, '_user_info'):
return req._user_info['uid']
if CFG_ACCESS_CONTROL_LEVEL_SITE == 1: return 0
if CFG_ACCESS_CONTROL_LEVEL_SITE == 2: return -1
guest = 0
try:
session = get_session(req)
except Exception:
## Not possible to obtain a session
return 0
uid = session.get('uid', -1)
if not session.need_https:
if uid == -1: # first time, so create a guest user
if CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
uid = session['uid'] = createGuestUser()
session.set_remember_me(False)
guest = 1
else:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
session['uid'] = 0
session.set_remember_me(False)
return 0
else:
return -1
else:
if not hasattr(req, '_user_info') and 'user_info' in session:
req._user_info = session['user_info']
req._user_info = collect_user_info(req, refresh=True)
if guest == 0:
guest = isGuestUser(uid)
if guest:
if CFG_ACCESS_CONTROL_LEVEL_GUESTS == 0:
return uid
elif CFG_ACCESS_CONTROL_LEVEL_GUESTS >= 1:
return -1
else:
res = run_sql("SELECT note FROM user WHERE id=%s", (uid,))
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 0:
return uid
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1 and res and res[0][0] in [1, "1"]:
return uid
else:
return -1
def setUid(req, uid, remember_me=False):
"""It sets the userId into the session, and raise the cookie to the client.
"""
if hasattr(req, '_user_info'):
del req._user_info
session = get_session(req)
try:
guest_personinfo = session['personinfo']
except KeyError:
guest_personinfo = dict()
session.invalidate()
session = get_session(req)
# a part of the session before the user logged in (browsing as guest)
# is copied to the new session
session['guest_personinfo'] = guest_personinfo
session['uid'] = uid
if remember_me:
session.set_timeout(86400)
session.set_remember_me(remember_me)
if uid > 0:
user_info = collect_user_info(req, login_time=True)
session['user_info'] = user_info
req._user_info = user_info
session.save()
else:
del session['user_info']
return uid
def session_param_del(req, key):
"""
Remove a given key from the session.
"""
session = get_session(req)
del session[key]
def session_param_set(req, key, value):
"""
Set a VALUE for the session param KEY for the current session.
"""
session = get_session(req)
session[key] = value
def session_param_get(req, key, default = None):
"""
Return session parameter value associated with session parameter KEY for the current session.
If the key doesn't exists return the provided default.
"""
session = get_session(req)
return session.get(key, default)
def session_param_list(req):
"""
List all available session parameters.
"""
session = get_session(req)
return session.keys()
def get_last_login(uid):
"""Return the last_login datetime for uid if any, otherwise return the Epoch."""
res = run_sql('SELECT last_login FROM user WHERE id=%s', (uid,), 1)
if res and res[0][0]:
return res[0][0]
else:
return datetime.datetime(1970, 1, 1)
def get_user_info(uid, ln=CFG_SITE_LANG):
"""Get infos for a given user.
@param uid: user id (int)
@return: tuple: (uid, nickname, display_name)
"""
_ = gettext_set_language(ln)
query = """SELECT id, nickname
FROM user
WHERE id=%s"""
res = run_sql(query, (uid,))
if res:
if res[0]:
user = list(res[0])
if user[1]:
user.append(user[1])
else:
user[1] = str(user[0])
user.append(_("user") + ' #' + str(user[0]))
return tuple(user)
return (uid, '', _("N/A"))
def get_uid_from_email(email):
"""Return the uid corresponding to an email.
Return -1 when the email does not exists."""
try:
res = run_sql("SELECT id FROM user WHERE email=%s", (email,))
if res:
return res[0][0]
else:
return -1
except OperationalError:
register_exception()
return -1
def isGuestUser(uid, run_on_slave=True):
"""It Checks if the userId corresponds to a guestUser or not
isGuestUser(uid) -> boolean
"""
out = 1
try:
res = run_sql("SELECT email FROM user WHERE id=%s LIMIT 1", (uid,), 1,
run_on_slave=run_on_slave)
if res:
if res[0][0]:
out = 0
except OperationalError:
register_exception()
return out
def isUserSubmitter(user_info):
"""Return True if the user is a submitter for something; False otherwise."""
u_email = get_email(user_info['uid'])
res = run_sql("SELECT email FROM sbmSUBMISSIONS WHERE email=%s LIMIT 1", (u_email,), 1)
return len(res) > 0
def isUserReferee(user_info):
"""Return True if the user is a referee for something; False otherwise."""
if CFG_CERN_SITE:
return True
else:
for (role_id, role_name, role_description) in acc_get_action_roles(acc_get_action_id('referee')):
if acc_is_user_in_role(user_info, role_id):
return True
return False
def isUserAdmin(user_info):
"""Return True if the user has some admin rights; False otherwise."""
return acc_find_possible_activities(user_info) != {}
def isUserSuperAdmin(user_info):
"""Return True if the user is superadmin; False otherwise."""
if run_sql("""SELECT r.id
FROM accROLE r LEFT JOIN user_accROLE ur
ON r.id = ur.id_accROLE
WHERE r.name = %s AND
ur.id_user = %s AND ur.expiration>=NOW() LIMIT 1""", (SUPERADMINROLE, user_info['uid']), 1, run_on_slave=True):
return True
return acc_firerole_check_user(user_info, load_role_definition(acc_get_role_id(SUPERADMINROLE)))
def nickname_valid_p(nickname):
"""Check whether wanted NICKNAME supplied by the user is valid.
At the moment we just check whether it is not empty, does not
contain blanks or @, is not equal to `guest', etc.
This check relies on re_invalid_nickname regexp (see above)
Return 1 if nickname is okay, return 0 if it is not.
"""
if nickname and \
not(nickname.startswith(' ') or nickname.endswith(' ')) and \
nickname.lower() != 'guest':
if not re_invalid_nickname.match(nickname):
return 1
return 0
def email_valid_p(email):
"""Check whether wanted EMAIL address supplied by the user is valid.
At the moment we just check whether it contains '@' and whether
it doesn't contain blanks. We also check the email domain if
CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN is set.
Return 1 if email is okay, return 0 if it is not.
"""
if (email.find("@") <= 0) or (email.find(" ") > 0):
return 0
elif CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN:
if not email.endswith(CFG_ACCESS_CONTROL_LIMIT_REGISTRATION_TO_DOMAIN):
return 0
return 1
def confirm_email(email):
"""Confirm the email. It returns None when there are problems, otherwise
it return the uid involved."""
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 0:
activated = 1
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
activated = 0
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2:
return -1
run_sql('UPDATE user SET note=%s where email=%s', (activated, email))
res = run_sql('SELECT id FROM user where email=%s', (email,))
if res:
if CFG_ACCESS_CONTROL_NOTIFY_ADMIN_ABOUT_NEW_ACCOUNTS:
send_new_admin_account_warning(email, CFG_SITE_ADMIN_EMAIL)
return res[0][0]
else:
return None
def registerUser(req, email, passw, nickname, register_without_nickname=False,
login_method=None, ln=CFG_SITE_LANG):
"""Register user with the desired values of NICKNAME, EMAIL and
PASSW.
If REGISTER_WITHOUT_NICKNAME is set to True, then ignore
desired NICKNAME and do not set any. This is suitable for
external authentications so that people can login without
having to register an internal account first.
Return 0 if the registration is successful, 1 if email is not
valid, 2 if nickname is not valid, 3 if email is already in the
database, 4 if nickname is already in the database, 5 when
users cannot register themselves because of the site policy, 6 when the
site is having problem contacting the user.
If login_method is None or is equal to the key corresponding to local
authentication, then CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS is taken
in account for deciding the behaviour about registering.
"""
# is email valid?
email = email.lower()
if not email_valid_p(email):
return 1
_ = gettext_set_language(ln)
# is email already taken?
res = run_sql("SELECT email FROM user WHERE email=%s", (email,))
if len(res) > 0:
return 3
if register_without_nickname:
# ignore desired nick and use default empty string one:
nickname = ""
else:
# is nickname valid?
if not nickname_valid_p(nickname):
return 2
# is nickname already taken?
res = run_sql("SELECT nickname FROM user WHERE nickname=%s", (nickname,))
if len(res) > 0:
return 4
activated = 1 # By default activated
if not login_method or not CFG_EXTERNAL_AUTHENTICATION[login_method]: # local login
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 2:
return 5
elif CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT:
activated = 2 # Email confirmation required
elif CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 1:
activated = 0 # Administrator confirmation required
if CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT:
address_activation_key = mail_cookie_create_mail_activation(
email,
cookie_timeout=datetime.timedelta(
days=CFG_WEBSESSION_ADDRESS_ACTIVATION_EXPIRE_IN_DAYS
)
)
ip_address = req.remote_host or req.remote_ip
try:
if not send_email(CFG_SITE_SUPPORT_EMAIL, email, _("Account registration at %s") % CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME),
tmpl.tmpl_account_address_activation_email_body(email,
address_activation_key, ip_address, ln)):
return 1
except (smtplib.SMTPException, socket.error):
return 6
# okay, go on and register the user:
user_preference = get_default_user_preferences()
uid = run_sql("INSERT INTO user (nickname, email, password, note, settings, last_login) "
"VALUES (%s,%s,AES_ENCRYPT(email,%s),%s,%s, NOW())",
(nickname, email, passw, activated, serialize_via_marshal(user_preference)))
if activated == 1: # Ok we consider the user as logged in :-)
setUid(req, uid)
return 0
def updateDataUser(uid, email, nickname):
"""
Update user data. Used when a user changed his email or password
or nickname.
"""
email = email.lower()
if email == 'guest':
return 0
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 2:
run_sql("update user set email=%s where id=%s", (email, uid))
if nickname and nickname != '':
run_sql("update user set nickname=%s where id=%s", (nickname, uid))
return 1
def updatePasswordUser(uid, password):
"""Update the password of a user."""
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS < 3:
run_sql("update user set password=AES_ENCRYPT(email,%s) where id=%s", (password, uid))
return 1
def merge_usera_into_userb(id_usera, id_userb):
"""
Merges all the information of usera into userb.
Deletes afterwards any reference to usera.
The information about SQL tables is contained in the CFG_WEBUSER_USER_TABLES
variable.
"""
preferencea = get_user_preferences(id_usera)
preferenceb = get_user_preferences(id_userb)
preferencea.update(preferenceb)
set_user_preferences(id_userb, preferencea)
try:
## FIXME: for the time being, let's disable locking
## until we will move to InnoDB and we will have
## real transitions
#for table, dummy in CFG_WEBUSER_USER_TABLES:
#run_sql("LOCK TABLE %s WRITE" % table)
## Special treatment for BibAuthorID
from invenio.bibauthorid_dbinterface import webuser_merge_user
webuser_merge_user(id_usera, id_userb)
index = 0
table = ''
try:
for index, (table, column) in enumerate(CFG_WEBUSER_USER_TABLES):
run_sql("UPDATE %(table)s SET %(column)s=%%s WHERE %(column)s=%%s; DELETE FROM %(table)s WHERE %(column)s=%%s;" % {
'table': table,
'column': column
}, (id_userb, id_usera, id_usera))
except Exception, err:
msg = "Error when merging id_user=%s into id_userb=%s for table %s: %s\n" % (id_usera, id_userb, table, err)
msg += "users where succesfully already merged for tables: %s\n" % ', '.join([table[0] for table in CFG_WEBUSER_USER_TABLES[:index]])
msg += "users where not succesfully already merged for tables: %s\n" % ', '.join([table[0] for table in CFG_WEBUSER_USER_TABLES[index:]])
register_exception(alert_admin=True, prefix=msg)
raise
finally:
## FIXME: locking disabled
#run_sql("UNLOCK TABLES")
pass
def loginUser(req, p_un, p_pw, login_method):
"""It is a first simple version for the authentication of user. It returns the id of the user,
for checking afterwards if the login is correct
"""
# p_un passed may be an email or a nickname:
p_email = get_email_from_username(p_un)
# go on with the old stuff based on p_email:
if not login_method in CFG_EXTERNAL_AUTHENTICATION:
return (None, p_email, p_pw, 12)
if CFG_EXTERNAL_AUTHENTICATION[login_method]: # External Authentication
try:
result = CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(p_email, p_pw, req)
if (result == (None, None) or result is None) and not login_method in ['oauth1', 'oauth2', 'openid']:
# There is no need to call auth_user with username for
# OAuth1, OAuth2 and OpenID authentication
result = CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(p_un, p_pw, req) ## We try to login with either the email of the nickname
if isinstance(result, (tuple, list)) and len(result) == 2:
p_email, p_extid = result
else:
## For backward compatibility we use the email as external
## identifier if it was not returned already by the plugin
p_email, p_extid = str(result), str(result)
if p_email:
p_email = p_email.lower()
if not p_extid:
p_extid = p_email
elif not p_extid:
try:
# OpenID and OAuth authentications have own error messages
return (None, p_email, p_pw, CFG_EXTERNAL_AUTHENTICATION[login_method].get_msg(req))
except NotImplementedError:
return(None, p_email, p_pw, 15)
else:
# External login is successfull but couldn't fetch the email
# address.
generate_string = lambda: reduce((lambda x, y: x+y), [random.choice("qwertyuiopasdfghjklzxcvbnm1234567890") for i in range(32)])
random_string = generate_string()
p_email = CFG_TEMP_EMAIL_ADDRESS % random_string
while run_sql("SELECT * FROM user WHERE email=%s", (p_email,)):
random_string = generate_string()
p_email = CFG_TEMP_EMAIL_ADDRESS % random_string
except InvenioWebAccessExternalAuthError:
register_exception(req=req, alert_admin=True)
raise
if p_email: # Authenthicated externally
query_result = run_sql("SELECT id_user FROM userEXT WHERE id=%s and method=%s", (p_extid, login_method))
if query_result:
## User was already registered with this external method.
id_user = query_result[0][0]
old_email = run_sql("SELECT email FROM user WHERE id=%s", (id_user,))[0][0]
# Look if the email address matches with the template given.
# If it matches, use the email address saved in the database.
regexp = re.compile(CFG_TEMP_EMAIL_ADDRESS % r"\w*")
if regexp.match(p_email):
p_email = old_email
if old_email != p_email:
## User has changed email of reference.
res = run_sql("SELECT id FROM user WHERE email=%s", (p_email,))
if res:
## User was also registered with the other email.
## We should merge the two!
new_id = res[0][0]
if new_id == id_user:
raise AssertionError("We should not reach this situation: new_id=%s, id_user=%s, old_email=%s, p_email=%s" % (new_id, id_user, old_email, p_email))
merge_usera_into_userb(id_user, new_id)
run_sql("DELETE FROM user WHERE id=%s", (id_user, ))
for row in run_sql("SELECT method FROM userEXT WHERE id_user=%s", (id_user, )):
## For all known accounts of id_user not conflicting with new_id we move them to refer to new_id
if not run_sql("SELECT method FROM userEXT WHERE id_user=%s AND method=%s", (new_id, row[0])):
run_sql("UPDATE userEXT SET id_user=%s WHERE id_user=%s AND method=%s", (new_id, id_user, row[0]))
## And we delete the duplicate remaining ones :-)
run_sql("DELETE FROM userEXT WHERE id_user=%s", (id_user, ))
id_user = new_id
else:
## We just need to rename the email address of the
## corresponding user. Unfortunately the local
## password will be then invalid, but its unlikely
## the user is using both an external and a local
## account.
run_sql("UPDATE user SET email=%s WHERE id=%s", (p_email, id_user))
else:
## User was not already registered with this external method.
query_result = run_sql("SELECT id FROM user WHERE email=%s", (p_email, ))
if query_result:
## The user was already known with this email
id_user = query_result[0][0]
## We fix the inconsistence in the userEXT table.
run_sql("INSERT INTO userEXT(id, method, id_user) VALUES(%s, %s, %s) ON DUPLICATE KEY UPDATE id=%s, method=%s, id_user=%s", (p_extid, login_method, id_user, p_extid, login_method, id_user))
else:
## First time user
p_pw_local = int(random.random() * 1000000)
p_nickname = ''
if CFG_EXTERNAL_AUTHENTICATION[login_method].enforce_external_nicknames:
try: # Let's discover the external nickname!
p_nickname = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_nickname(p_email, p_pw, req)
except (AttributeError, NotImplementedError):
pass
except:
register_exception(req=req, alert_admin=True)
raise
res = registerUser(req, p_email, p_pw_local, p_nickname,
register_without_nickname=p_nickname == '',
login_method=login_method)
if res == 4 or res == 2: # The nickname was already taken
res = registerUser(req, p_email, p_pw_local, '',
register_without_nickname=True,
login_method=login_method)
query_result = run_sql("SELECT id from user where email=%s", (p_email,))
id_user = query_result[0][0]
elif res == 0: # Everything was ok, with or without nickname.
query_result = run_sql("SELECT id from user where email=%s", (p_email,))
id_user = query_result[0][0]
elif res == 6: # error in contacting the user via email
return (None, p_email, p_pw_local, 19)
else:
return (None, p_email, p_pw_local, 13)
run_sql("INSERT INTO userEXT(id, method, id_user) VALUES(%s, %s, %s)", (p_extid, login_method, id_user))
if CFG_EXTERNAL_AUTHENTICATION[login_method].enforce_external_nicknames:
## Let's still fetch a possibly upgraded nickname.
try: # Let's discover the external nickname!
p_nickname = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_nickname(p_email, p_pw, req)
if nickname_valid_p(p_nickname) and nicknameUnique(p_nickname) == 0:
updateDataUser(id_user, p_email, p_nickname)
except (AttributeError, NotImplementedError):
pass
except:
register_exception(alert_admin=True)
raise
try:
groups = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_groups_membership(p_email, p_pw, req)
# groups is a dictionary {group_name : group_description,}
new_groups = {}
for key, value in groups.items():
new_groups[key + " [" + str(login_method) + "]"] = value
groups = new_groups
except (AttributeError, NotImplementedError):
pass
except:
register_exception(req=req, alert_admin=True)
return (None, p_email, p_pw, 16)
else: # Groups synchronization
if groups:
from invenio.webgroup import synchronize_external_groups
synchronize_external_groups(id_user, groups, login_method)
user_prefs = get_user_preferences(id_user)
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
## I.e. if the login method is not of robot type:
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS >= 4:
# Let's prevent the user to switch login_method
if user_prefs.has_key("login_method") and \
user_prefs["login_method"] != login_method:
return (None, p_email, p_pw, 11)
user_prefs["login_method"] = login_method
# Cleaning external settings
for key in user_prefs.keys():
if key.startswith('EXTERNAL_'):
del user_prefs[key]
try:
# Importing external settings
new_prefs = CFG_EXTERNAL_AUTHENTICATION[login_method].fetch_user_preferences(p_email, p_pw, req)
for key, value in new_prefs.items():
user_prefs['EXTERNAL_' + key] = value
except (AttributeError, NotImplementedError):
pass
except InvenioWebAccessExternalAuthError:
register_exception(req=req, alert_admin=True)
return (None, p_email, p_pw, 16)
# Storing settings
set_user_preferences(id_user, user_prefs)
else:
return (None, p_un, p_pw, 10)
else: # Internal Authenthication
if not p_pw:
p_pw = ''
query_result = run_sql("SELECT id,email,note from user where email=%s and password=AES_ENCRYPT(email,%s)", (p_email, p_pw,))
if query_result:
#FIXME drop external groups and settings
note = query_result[0][2]
id_user = query_result[0][0]
if note == '1': # Good account
preferred_login_method = get_user_preferences(query_result[0][0])['login_method']
p_email = query_result[0][1].lower()
if login_method != preferred_login_method:
if preferred_login_method in CFG_EXTERNAL_AUTHENTICATION:
return (None, p_email, p_pw, 11)
elif note == '2': # Email address need to be confirmed by user
return (None, p_email, p_pw, 17)
elif note == '0': # Account need to be confirmed by administrator
return (None, p_email, p_pw, 18)
else:
return (None, p_email, p_pw, 14)
# Login successful! Updating the last access time
run_sql("UPDATE user SET last_login=NOW() WHERE email=%s", (p_email,))
return (id_user, p_email, p_pw, 0)
def drop_external_settings(userId):
"""Drop the external (EXTERNAL_) settings of userid."""
prefs = get_user_preferences(userId)
for key in prefs.keys():
if key.startswith('EXTERNAL_'):
del prefs[key]
set_user_preferences(userId, prefs)
def logoutUser(req):
"""It logout the user of the system, creating a guest user.
"""
session = get_session(req)
if CFG_WEBSESSION_DIFFERENTIATE_BETWEEN_GUESTS:
uid = createGuestUser()
session['uid'] = uid
session.set_remember_me(False)
session.save()
else:
uid = 0
session.invalidate()
if hasattr(req, '_user_info'):
delattr(req, '_user_info')
return uid
def username_exists_p(username):
"""Check if USERNAME exists in the system. Username may be either
nickname or email.
Return 1 if it does exist, 0 if it does not.
"""
if username == "":
# return not exists if asked for guest users
return 0
res = run_sql("SELECT email FROM user WHERE email=%s", (username,)) + \
run_sql("SELECT email FROM user WHERE nickname=%s", (username,))
if len(res) > 0:
return 1
return 0
def emailUnique(p_email):
"""Check if the email address only exists once. If yes, return userid, if not, -1
"""
query_result = run_sql("select id, email from user where email=%s", (p_email,))
if len(query_result) == 1:
return query_result[0][0]
elif len(query_result) == 0:
return 0
return -1
def nicknameUnique(p_nickname):
"""Check if the nickname only exists once. If yes, return userid, if not, -1
"""
query_result = run_sql("select id, nickname from user where nickname=%s", (p_nickname,))
if len(query_result) == 1:
return query_result[0][0]
elif len(query_result) == 0:
return 0
return -1
def update_Uid(req, p_email, remember_me=False):
"""It updates the userId of the session. It is used when a guest user is logged in succesfully in the system with a given email and password.
As a side effect it will discover all the restricted collection to which the user has right to
"""
query_ID = int(run_sql("select id from user where email=%s",
(p_email,))[0][0])
setUid(req, query_ID, remember_me)
return query_ID
def send_new_admin_account_warning(new_account_email, send_to, ln=CFG_SITE_LANG):
"""Send an email to the address given by send_to about the new account new_account_email."""
_ = gettext_set_language(ln)
sub = _("New account on") + " '%s'" % CFG_SITE_NAME
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
sub += " - " + _("PLEASE ACTIVATE")
body = _("A new account has been created on") + " '%s'" % CFG_SITE_NAME
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS == 1:
body += _(" and is awaiting activation")
body += ":\n\n"
body += _(" Username/Email") + ": %s\n\n" % new_account_email
body += _("You can approve or reject this account request at") + ": %s/admin/webaccess/webaccessadmin.py/manageaccounts\n" % CFG_SITE_URL
return send_email(CFG_SITE_SUPPORT_EMAIL, send_to, subject=sub, content=body)
def get_email(uid):
"""Return email address of the user uid. Return string 'guest' in case
the user is not found."""
out = "guest"
res = run_sql("SELECT email FROM user WHERE id=%s", (uid,), 1)
if res and res[0][0]:
out = res[0][0].lower()
return out
def get_email_from_username(username):
"""Return email address of the user corresponding to USERNAME.
The username may be either nickname or email. Return USERNAME
untouched if not found in the database or if found several
matching entries.
"""
if username == '':
return ''
out = username
res = run_sql("SELECT email FROM user WHERE email=%s", (username,), 1) + \
run_sql("SELECT email FROM user WHERE nickname=%s", (username,), 1)
if res and len(res) == 1:
out = res[0][0].lower()
return out
#def get_password(uid):
#"""Return password of the user uid. Return None in case
#the user is not found."""
#out = None
#res = run_sql("SELECT password FROM user WHERE id=%s", (uid,), 1)
#if res and res[0][0] != None:
#out = res[0][0]
#return out
def get_nickname(uid):
"""Return nickname of the user uid. Return None in case
the user is not found."""
out = None
res = run_sql("SELECT nickname FROM user WHERE id=%s", (uid,), 1)
if res and res[0][0]:
out = res[0][0]
return out
def get_nickname_or_email(uid):
"""Return nickname (preferred) or the email address of the user uid.
Return string 'guest' in case the user is not found."""
out = "guest"
res = run_sql("SELECT nickname, email FROM user WHERE id=%s", (uid,), 1)
if res and res[0]:
if res[0][0]:
out = res[0][0]
elif res[0][1]:
out = res[0][1].lower()
return out
def create_userinfobox_body(req, uid, language="en"):
"""Create user info box body for user UID in language LANGUAGE."""
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
try:
return tmpl.tmpl_create_userinfobox(ln=language,
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats']
)
except OperationalError:
return ""
def create_useractivities_menu(req, uid, navmenuid, ln="en"):
"""Create user activities menu.
@param req: request object
@param uid: user id
@type uid: int
@param navmenuid: the section of the website this page belongs (search, submit, baskets, etc.)
@type navmenuid: string
@param ln: language
@type ln: string
@return: HTML menu of the user activities
@rtype: string
"""
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
is_user_menu_selected = False
if navmenuid == 'personalize' or \
navmenuid.startswith('your') and \
navmenuid != 'youraccount':
is_user_menu_selected = True
try:
return tmpl.tmpl_create_useractivities_menu(
ln=ln,
selected=is_user_menu_selected,
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats'],
usecomments=user_info['precached_sendcomments'],
)
except OperationalError:
return ""
def create_adminactivities_menu(req, uid, navmenuid, ln="en"):
"""Create admin activities menu.
@param req: request object
@param uid: user id
@type uid: int
@param navmenuid: the section of the website this page belongs (search, submit, baskets, etc.)
@type navmenuid: string
@param ln: language
@type ln: string
@return: HTML menu of the user activities
@rtype: string
"""
_ = gettext_set_language(ln)
if req:
if req.is_https():
url_referer = CFG_SITE_SECURE_URL + req.unparsed_uri
else:
url_referer = CFG_SITE_URL + req.unparsed_uri
if '/youraccount/logout' in url_referer:
url_referer = ''
else:
url_referer = CFG_SITE_URL
user_info = collect_user_info(req)
activities = acc_find_possible_activities(user_info, ln)
# For BibEdit and BibDocFile menu items, take into consideration
# current record whenever possible
if activities.has_key(_("Run Record Editor")) or \
activities.has_key(_("Run Document File Manager")) and \
user_info['uri'].startswith('/' + CFG_SITE_RECORD + '/'):
try:
# Get record ID and try to cast it to an int
current_record_id = int(urlparse.urlparse(user_info['uri'])[2].split('/')[2])
except:
pass
else:
if activities.has_key(_("Run Record Editor")):
activities[_("Run Record Editor")] = activities[_("Run Record Editor")] + '&#state=edit&recid=' + str(current_record_id)
if activities.has_key(_("Run Document File Manager")):
activities[_("Run Document File Manager")] = activities[_("Run Document File Manager")] + '&recid=' + str(current_record_id)
try:
return tmpl.tmpl_create_adminactivities_menu(
ln=ln,
selected=navmenuid == 'admin',
url_referer=url_referer,
guest=int(user_info['guest']),
username=get_nickname_or_email(uid),
submitter=user_info['precached_viewsubmissions'],
referee=user_info['precached_useapprove'],
admin=user_info['precached_useadmin'],
usebaskets=user_info['precached_usebaskets'],
usemessages=user_info['precached_usemessages'],
usealerts=user_info['precached_usealerts'],
usegroups=user_info['precached_usegroups'],
useloans=user_info['precached_useloans'],
usestats=user_info['precached_usestats'],
activities=activities
)
except OperationalError:
return ""
def list_registered_users():
"""List all registered users."""
return run_sql("SELECT id,email FROM user where email!=''")
def list_users_in_role(role):
"""List all users of a given role (see table accROLE)
@param role: role of user (string)
@return: list of uids
"""
res = run_sql("""SELECT uacc.id_user
FROM user_accROLE uacc JOIN accROLE acc
ON uacc.id_accROLE=acc.id
WHERE acc.name=%s""",
(role,), run_on_slave=True)
if res:
return map(lambda x: int(x[0]), res)
return []
def list_users_in_roles(role_list):
"""List all users of given roles (see table accROLE)
@param role_list: list of roles [string]
@return: list of uids
"""
if not(type(role_list) is list or type(role_list) is tuple):
role_list = [role_list]
query = """SELECT DISTINCT(uacc.id_user)
FROM user_accROLE uacc JOIN accROLE acc
ON uacc.id_accROLE=acc.id
"""
query_addons = ""
query_params = ()
if len(role_list) > 0:
query_params = role_list
query_addons = " WHERE "
for role in role_list[:-1]:
query_addons += "acc.name=%s OR "
query_addons += "acc.name=%s"
res = run_sql(query + query_addons, query_params, run_on_slave=True)
if res:
return map(lambda x: int(x[0]), res)
return []
def get_uid_based_on_pref(prefname, prefvalue):
"""get the user's UID based where his/her preference prefname has value prefvalue in preferences"""
prefs = run_sql("SELECT id, settings FROM user WHERE settings is not NULL")
the_uid = None
for pref in prefs:
try:
settings = deserialize_via_marshal(pref[1])
if (settings.has_key(prefname)) and (settings[prefname] == prefvalue):
the_uid = pref[0]
except:
pass
return the_uid
def get_user_preferences(uid):
pref = run_sql("SELECT id, settings FROM user WHERE id=%s", (uid,))
if pref:
try:
return deserialize_via_marshal(pref[0][1])
except:
pass
return get_default_user_preferences() # empty dict mean no preferences
def set_user_preferences(uid, pref):
assert(type(pref) == type({}))
run_sql("UPDATE user SET settings=%s WHERE id=%s",
(serialize_via_marshal(pref), uid))
def get_default_user_preferences():
user_preference = {
'login_method': ''}
if CFG_EXTERNAL_AUTH_DEFAULT in CFG_EXTERNAL_AUTHENTICATION:
user_preference['login_method'] = CFG_EXTERNAL_AUTH_DEFAULT
return user_preference
def get_preferred_user_language(req):
def _get_language_from_req_header(accept_language_header):
"""Extract langs info from req.headers_in['Accept-Language'] which
should be set to something similar to:
'fr,en-us;q=0.7,en;q=0.3'
"""
tmp_langs = {}
for lang in accept_language_header.split(','):
lang = lang.split(';q=')
if len(lang) == 2:
lang[1] = lang[1].replace('"', '') # Hack for Yeti robot
try:
tmp_langs[float(lang[1])] = lang[0]
except ValueError:
pass
else:
tmp_langs[1.0] = lang[0]
ret = []
priorities = tmp_langs.keys()
priorities.sort()
priorities.reverse()
for priority in priorities:
ret.append(tmp_langs[priority])
return ret
uid = getUid(req)
guest = isGuestUser(uid)
new_lang = None
preferred_lang = None
if not guest:
user_preferences = get_user_preferences(uid)
preferred_lang = new_lang = user_preferences.get('language', None)
if not new_lang:
try:
new_lang = wash_languages(cgi.parse_qs(req.args)['ln'])
except (TypeError, AttributeError, KeyError):
pass
if not new_lang:
try:
new_lang = wash_languages(_get_language_from_req_header(req.headers_in['Accept-Language']))
except (TypeError, AttributeError, KeyError):
pass
new_lang = wash_language(new_lang)
if new_lang != preferred_lang and not guest:
user_preferences['language'] = new_lang
set_user_preferences(uid, user_preferences)
return new_lang
def collect_user_info(req, login_time=False, refresh=False):
"""Given the mod_python request object rec or a uid it returns a dictionary
containing at least the keys uid, nickname, email, groups, plus any external keys in
the user preferences (collected at login time and built by the different
external authentication plugins) and if the mod_python request object is
provided, also the remote_ip, remote_host, referer, agent fields.
NOTE: if req is a mod_python request object, the user_info dictionary
is saved into req._user_info (for caching purpouses)
setApacheUser & setUid will properly reset it.
"""
from invenio.search_engine import get_permitted_restricted_collections
user_info = {
'remote_ip' : '',
'remote_host' : '',
'referer' : '',
'uri' : '',
'agent' : '',
'uid' :-1,
'nickname' : '',
'email' : '',
'group' : [],
'guest' : '1',
'session' : None,
'precached_permitted_restricted_collections' : [],
'precached_usebaskets' : False,
'precached_useloans' : False,
'precached_usegroups' : False,
'precached_usealerts' : False,
'precached_usemessages' : False,
'precached_viewsubmissions' : False,
'precached_useapprove' : False,
'precached_useadmin' : False,
'precached_usestats' : False,
'precached_viewclaimlink' : False,
'precached_usepaperclaim' : False,
'precached_usepaperattribution' : False,
'precached_canseehiddenmarctags' : False,
'precached_sendcomments' : False,
}
try:
is_req = False
if not req:
uid = -1
elif type(req) in (type(1), type(1L)):
## req is infact a user identification
uid = req
elif type(req) is dict:
## req is by mistake already a user_info
try:
assert(req.has_key('uid'))
assert(req.has_key('email'))
assert(req.has_key('nickname'))
except AssertionError:
## mmh... misuse of collect_user_info. Better warn the admin!
register_exception(alert_admin=True)
user_info.update(req)
return user_info
else:
is_req = True
uid = getUid(req)
if hasattr(req, '_user_info') and not login_time:
user_info = req._user_info
if not refresh:
return req._user_info
req._user_info = user_info
try:
user_info['remote_ip'] = req.remote_ip
except gaierror:
#FIXME: we should support IPV6 too. (hint for FireRole)
pass
user_info['session'] = get_session(req).sid()
user_info['remote_host'] = req.remote_host or ''
user_info['referer'] = req.headers_in.get('Referer', '')
user_info['uri'] = req.unparsed_uri or ()
user_info['agent'] = req.headers_in.get('User-Agent', 'N/A')
user_info['uid'] = uid
user_info['nickname'] = get_nickname(uid) or ''
user_info['email'] = get_email(uid) or ''
user_info['group'] = []
user_info['guest'] = str(isGuestUser(uid))
if user_info['guest'] == '1' and CFG_INSPIRE_SITE:
usepaperattribution = False
viewclaimlink = False
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionviewers"))):
usepaperattribution = True
# if (CFG_BIBAUTHORID_ENABLED
# and usepaperattribution
# and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionlinkviewers"))):
# viewclaimlink = True
if is_req:
session = get_session(req)
viewlink = False
try:
viewlink = session['personinfo']['claim_in_process']
except (KeyError, TypeError):
viewlink = False
else:
viewlink = False
if (CFG_BIBAUTHORID_ENABLED
and usepaperattribution
and viewlink):
viewclaimlink = True
user_info['precached_viewclaimlink'] = viewclaimlink
user_info['precached_usepaperattribution'] = usepaperattribution
if user_info['guest'] == '0':
user_info['group'] = [group[1] for group in get_groups(uid)]
prefs = get_user_preferences(uid)
login_method = prefs['login_method']
## NOTE: we fall back to default login_method if the login_method
## specified in the user settings does not exist (e.g. after
## a migration.)
login_object = CFG_EXTERNAL_AUTHENTICATION.get(login_method, CFG_EXTERNAL_AUTHENTICATION[CFG_EXTERNAL_AUTH_DEFAULT])
if login_object and ((datetime.datetime.now() - get_last_login(uid)).seconds > 3600):
## The user uses an external authentication method and it's a bit since
## she has not performed a login
if not CFG_EXTERNAL_AUTH_USING_SSO or (
is_req and login_object.in_shibboleth(req)):
## If we're using SSO we must be sure to be in HTTPS and Shibboleth handler
## otherwise we can't really read anything, hence
## it's better skip the synchronization
try:
groups = login_object.fetch_user_groups_membership(user_info['email'], req=req)
# groups is a dictionary {group_name : group_description,}
new_groups = {}
for key, value in groups.items():
new_groups[key + " [" + str(login_method) + "]"] = value
groups = new_groups
except (AttributeError, NotImplementedError, TypeError, InvenioWebAccessExternalAuthError):
pass
else: # Groups synchronization
from invenio.webgroup import synchronize_external_groups
synchronize_external_groups(uid, groups, login_method)
user_info['group'] = [group[1] for group in get_groups(uid)]
try:
# Importing external settings
new_prefs = login_object.fetch_user_preferences(user_info['email'], req=req)
for key, value in new_prefs.items():
prefs['EXTERNAL_' + key] = value
except (AttributeError, NotImplementedError, TypeError, InvenioWebAccessExternalAuthError):
pass
else:
set_user_preferences(uid, prefs)
prefs = get_user_preferences(uid)
run_sql('UPDATE user SET last_login=NOW() WHERE id=%s', (uid,))
if prefs:
for key, value in prefs.iteritems():
user_info[key.lower()] = value
if login_time:
## Heavy computational information
from invenio.access_control_engine import acc_authorize_action
user_info['precached_permitted_restricted_collections'] = get_permitted_restricted_collections(user_info)
user_info['precached_usebaskets'] = acc_authorize_action(user_info, 'usebaskets')[0] == 0
user_info['precached_useloans'] = acc_authorize_action(user_info, 'useloans')[0] == 0
user_info['precached_usegroups'] = acc_authorize_action(user_info, 'usegroups')[0] == 0
user_info['precached_usealerts'] = acc_authorize_action(user_info, 'usealerts')[0] == 0
user_info['precached_usemessages'] = acc_authorize_action(user_info, 'usemessages')[0] == 0
user_info['precached_usestats'] = acc_authorize_action(user_info, 'runwebstatadmin')[0] == 0
user_info['precached_viewsubmissions'] = isUserSubmitter(user_info)
user_info['precached_useapprove'] = isUserReferee(user_info)
user_info['precached_useadmin'] = isUserAdmin(user_info)
user_info['precached_canseehiddenmarctags'] = acc_authorize_action(user_info, 'runbibedit')[0] == 0
user_info['precached_sendcomments'] = acc_authorize_action(user_info, 'sendcomment', '*')[0] == 0
usepaperclaim = False
usepaperattribution = False
viewclaimlink = False
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperclaimviewers"))):
usepaperclaim = True
if (CFG_BIBAUTHORID_ENABLED
and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionviewers"))):
usepaperattribution = True
if is_req:
session = get_session(req)
viewlink = False
try:
viewlink = session['personinfo']['claim_in_process']
except (KeyError, TypeError):
viewlink = False
else:
viewlink = False
if (CFG_BIBAUTHORID_ENABLED
and usepaperattribution
and viewlink):
viewclaimlink = True
# if (CFG_BIBAUTHORID_ENABLED
# and ((usepaperclaim or usepaperattribution)
# and acc_is_user_in_role(user_info, acc_get_role_id("paperattributionlinkviewers")))):
# viewclaimlink = True
user_info['precached_viewclaimlink'] = viewclaimlink
user_info['precached_usepaperclaim'] = usepaperclaim
user_info['precached_usepaperattribution'] = usepaperattribution
except Exception, e:
register_exception()
return user_info
| gpl-2.0 |
iameli/kubernetes | cluster/juju/layers/kubernetes/reactive/k8s.py | 27 | 21547 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shlex import split
from subprocess import call
from subprocess import check_call
from subprocess import check_output
from charms.docker.compose import Compose
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_any
from charms.reactive import when_not
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import is_leader
from charmhelpers.core.hookenv import leader_set
from charmhelpers.core.hookenv import leader_get
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.host import chdir
import tlslib
@when('leadership.is_leader')
def i_am_leader():
'''The leader is the Kubernetes master node. '''
leader_set({'master-address': hookenv.unit_private_ip()})
@when_not('tls.client.authorization.required')
def configure_easrsa():
'''Require the tls layer to generate certificates with "clientAuth". '''
# By default easyrsa generates the server certificates without clientAuth
# Setting this state before easyrsa is configured ensures the tls layer is
# configured to generate certificates with client authentication.
set_state('tls.client.authorization.required')
domain = hookenv.config().get('dns_domain')
cidr = hookenv.config().get('cidr')
sdn_ip = get_sdn_ip(cidr)
# Create extra sans that the tls layer will add to the server cert.
extra_sans = [
sdn_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
unitdata.kv().set('extra_sans', extra_sans)
@hook('config-changed')
def config_changed():
'''If the configuration values change, remove the available states.'''
config = hookenv.config()
if any(config.changed(key) for key in config.keys()):
hookenv.log('The configuration options have changed.')
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
if is_leader():
hookenv.log('Removing master container and kubelet.available state.') # noqa
# Stop and remove the Kubernetes kubelet container.
compose.kill('master')
compose.rm('master')
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
else:
hookenv.log('Removing kubelet container and kubelet.available state.') # noqa
# Stop and remove the Kubernetes kubelet container.
compose.kill('kubelet')
compose.rm('kubelet')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
hookenv.log('Removing proxy container and proxy.available state.')
# Stop and remove the Kubernetes proxy container.
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting proxy.
remove_state('proxy.available')
if config.changed('version'):
hookenv.log('The version changed removing the states so the new '
'version of kubectl will be downloaded.')
remove_state('kubectl.downloaded')
remove_state('kubeconfig.created')
@when('tls.server.certificate available')
@when_not('k8s.server.certificate available')
def server_cert():
'''When the server certificate is available, get the server certificate
from the charm unitdata and write it to the kubernetes directory. '''
server_cert = '/srv/kubernetes/server.crt'
server_key = '/srv/kubernetes/server.key'
# Save the server certificate from unit data to the destination.
tlslib.server_cert(None, server_cert, user='ubuntu', group='ubuntu')
# Copy the server key from the default location to the destination.
tlslib.server_key(None, server_key, user='ubuntu', group='ubuntu')
set_state('k8s.server.certificate available')
@when('tls.client.certificate available')
@when_not('k8s.client.certficate available')
def client_cert():
'''When the client certificate is available, get the client certificate
from the charm unitdata and write it to the kubernetes directory. '''
client_cert = '/srv/kubernetes/client.crt'
client_key = '/srv/kubernetes/client.key'
# Save the client certificate from the default location to the destination.
tlslib.client_cert(None, client_cert, user='ubuntu', group='ubuntu')
# Copy the client key from the default location to the destination.
tlslib.client_key(None, client_key, user='ubuntu', group='ubuntu')
set_state('k8s.client.certficate available')
@when('tls.certificate.authority available')
@when_not('k8s.certificate.authority available')
def ca():
'''When the Certificate Authority is available, copy the CA from the
default location to the /srv/kubernetes directory. '''
ca_crt = '/srv/kubernetes/ca.crt'
# Copy the Certificate Authority to the destination directory.
tlslib.ca(None, ca_crt, user='ubuntu', group='ubuntu')
set_state('k8s.certificate.authority available')
@when('kubelet.available', 'leadership.is_leader')
@when_not('kubedns.available', 'skydns.available')
def launch_dns():
'''Create the "kube-system" namespace, the kubedns resource controller,
and the kubedns service. '''
hookenv.log('Creating kubernetes kubedns on the master node.')
# Only launch and track this state on the leader.
# Launching duplicate kubeDNS rc will raise an error
# Run a command to check if the apiserver is responding.
return_code = call(split('kubectl cluster-info'))
if return_code != 0:
hookenv.log('kubectl command failed, waiting for apiserver to start.')
remove_state('kubedns.available')
# Return without setting kubedns.available so this method will retry.
return
# Check for the "kube-system" namespace.
return_code = call(split('kubectl get namespace kube-system'))
if return_code != 0:
# Create the kube-system namespace that is used by the kubedns files.
check_call(split('kubectl create namespace kube-system'))
# Check for the kubedns replication controller.
return_code = call(split('kubectl get -f files/manifests/kubedns-controller.yaml'))
if return_code != 0:
# Create the kubedns replication controller from the rendered file.
check_call(split('kubectl create -f files/manifests/kubedns-controller.yaml'))
# Check for the kubedns service.
return_code = call(split('kubectl get -f files/manifests/kubedns-svc.yaml'))
if return_code != 0:
# Create the kubedns service from the rendered file.
check_call(split('kubectl create -f files/manifests/kubedns-svc.yaml'))
set_state('kubedns.available')
@when('skydns.available', 'leadership.is_leader')
def convert_to_kubedns():
'''Delete the skydns containers to make way for the kubedns containers.'''
hookenv.log('Deleteing the old skydns deployment.')
# Delete the skydns replication controller.
return_code = call(split('kubectl delete rc kube-dns-v11'))
# Delete the skydns service.
return_code = call(split('kubectl delete svc kube-dns'))
remove_state('skydns.available')
@when('docker.available')
@when_not('etcd.available')
def relation_message():
'''Take over messaging to let the user know they are pending a relationship
to the ETCD cluster before going any further. '''
status_set('waiting', 'Waiting for relation to ETCD')
@when('kubeconfig.created')
@when('etcd.available')
@when_not('kubelet.available', 'proxy.available')
def start_kubelet(etcd):
'''Run the hyperkube container that starts the kubernetes services.
When the leader, run the master services (apiserver, controller, scheduler,
proxy)
using the master.json from the rendered manifest directory.
When a follower, start the node services (kubelet, and proxy). '''
render_files(etcd)
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
status_set('maintenance', 'Starting the Kubernetes services.')
if is_leader():
compose.up('master')
compose.up('proxy')
set_state('kubelet.available')
# Open the secure port for api-server.
hookenv.open_port(6443)
else:
# Start the Kubernetes kubelet container using docker-compose.
compose.up('kubelet')
set_state('kubelet.available')
# Start the Kubernetes proxy container using docker-compose.
compose.up('proxy')
set_state('proxy.available')
status_set('active', 'Kubernetes services started')
@when('docker.available')
@when_not('kubectl.downloaded')
def download_kubectl():
'''Download the kubectl binary to test and interact with the cluster.'''
status_set('maintenance', 'Downloading the kubectl binary')
version = hookenv.config()['version']
cmd = 'wget -nv -O /usr/local/bin/kubectl https://storage.googleapis.com' \
'/kubernetes-release/release/{0}/bin/linux/{1}/kubectl'
cmd = cmd.format(version, arch())
hookenv.log('Downloading kubelet: {0}'.format(cmd))
check_call(split(cmd))
cmd = 'chmod +x /usr/local/bin/kubectl'
check_call(split(cmd))
set_state('kubectl.downloaded')
@when('kubectl.downloaded', 'leadership.is_leader', 'k8s.certificate.authority available', 'k8s.client.certficate available') # noqa
@when_not('kubeconfig.created')
def master_kubeconfig():
'''Create the kubernetes configuration for the master unit. The master
should create a package with the client credentials so the user can
interact securely with the apiserver.'''
hookenv.log('Creating Kubernetes configuration for master node.')
directory = '/srv/kubernetes'
ca = '/srv/kubernetes/ca.crt'
key = '/srv/kubernetes/client.key'
cert = '/srv/kubernetes/client.crt'
# Get the public address of the apiserver so users can access the master.
server = 'https://{0}:{1}'.format(hookenv.unit_public_ip(), '6443')
# Create the client kubeconfig so users can access the master node.
create_kubeconfig(directory, server, ca, key, cert)
# Copy the kubectl binary to this directory.
cmd = 'cp -v /usr/local/bin/kubectl {0}'.format(directory)
check_call(split(cmd))
# Use a context manager to run the tar command in a specific directory.
with chdir(directory):
# Create a package with kubectl and the files to use it externally.
cmd = 'tar -cvzf /home/ubuntu/kubectl_package.tar.gz ca.crt ' \
'client.key client.crt kubectl kubeconfig'
check_call(split(cmd))
# This sets up the client workspace consistently on the leader and nodes.
node_kubeconfig()
set_state('kubeconfig.created')
@when('kubectl.downloaded', 'k8s.certificate.authority available', 'k8s.server.certificate available') # noqa
@when_not('kubeconfig.created', 'leadership.is_leader')
def node_kubeconfig():
'''Create the kubernetes configuration (kubeconfig) for this unit.
The the nodes will create a kubeconfig with the server credentials so
the services can interact securely with the apiserver.'''
hookenv.log('Creating Kubernetes configuration for worker node.')
directory = '/var/lib/kubelet'
ca = '/srv/kubernetes/ca.crt'
cert = '/srv/kubernetes/server.crt'
key = '/srv/kubernetes/server.key'
# Get the private address of the apiserver for communication between units.
server = 'https://{0}:{1}'.format(leader_get('master-address'), '6443')
# Create the kubeconfig for the other services.
kubeconfig = create_kubeconfig(directory, server, ca, key, cert)
# Install the kubeconfig in the root user's home directory.
install_kubeconfig(kubeconfig, '/root/.kube', 'root')
# Install the kubeconfig in the ubunut user's home directory.
install_kubeconfig(kubeconfig, '/home/ubuntu/.kube', 'ubuntu')
set_state('kubeconfig.created')
@when('proxy.available')
@when_not('cadvisor.available')
def start_cadvisor():
'''Start the cAdvisor container that gives metrics about the other
application containers on this system. '''
compose = Compose('files/kubernetes')
compose.up('cadvisor')
hookenv.open_port(8088)
status_set('active', 'cadvisor running on port 8088')
set_state('cadvisor.available')
@when('kubelet.available', 'kubeconfig.created')
@when_any('proxy.available', 'cadvisor.available', 'kubedns.available')
def final_message():
'''Issue some final messages when the services are started. '''
# TODO: Run a simple/quick health checks before issuing this message.
status_set('active', 'Kubernetes running.')
def gather_sdn_data():
'''Get the Software Defined Network (SDN) information and return it as a
dictionary. '''
sdn_data = {}
# The dictionary named 'pillar' is a construct of the k8s template files.
pillar = {}
# SDN Providers pass data via the unitdata.kv module
db = unitdata.kv()
# Ideally the DNS address should come from the sdn cidr.
subnet = db.get('sdn_subnet')
if subnet:
# Generate the DNS ip address on the SDN cidr (this is desired).
pillar['dns_server'] = get_dns_ip(subnet)
else:
# There is no SDN cider fall back to the kubernetes config cidr option.
pillar['dns_server'] = get_dns_ip(hookenv.config().get('cidr'))
# The pillar['dns_domain'] value is used in the kubedns-controller.yaml
pillar['dns_domain'] = hookenv.config().get('dns_domain')
# Use a 'pillar' dictionary so we can reuse the upstream kubedns templates.
sdn_data['pillar'] = pillar
return sdn_data
def install_kubeconfig(kubeconfig, directory, user):
'''Copy the a file from the target to a new directory creating directories
if necessary. '''
# The file and directory must be owned by the correct user.
chown = 'chown {0}:{0} {1}'
if not os.path.isdir(directory):
os.makedirs(directory)
# Change the ownership of the config file to the right user.
check_call(split(chown.format(user, directory)))
# kubectl looks for a file named "config" in the ~/.kube directory.
config = os.path.join(directory, 'config')
# Copy the kubeconfig file to the directory renaming it to "config".
cmd = 'cp -v {0} {1}'.format(kubeconfig, config)
check_call(split(cmd))
# Change the ownership of the config file to the right user.
check_call(split(chown.format(user, config)))
def create_kubeconfig(directory, server, ca, key, cert, user='ubuntu'):
'''Create a configuration for kubernetes in a specific directory using
the supplied arguments, return the path to the file.'''
context = 'default-context'
cluster_name = 'kubernetes'
# Ensure the destination directory exists.
if not os.path.isdir(directory):
os.makedirs(directory)
# The configuration file should be in this directory named kubeconfig.
kubeconfig = os.path.join(directory, 'kubeconfig')
# Create the config file with the address of the master server.
cmd = 'kubectl config set-cluster --kubeconfig={0} {1} ' \
'--server={2} --certificate-authority={3}'
check_call(split(cmd.format(kubeconfig, cluster_name, server, ca)))
# Create the credentials using the client flags.
cmd = 'kubectl config set-credentials --kubeconfig={0} {1} ' \
'--client-key={2} --client-certificate={3}'
check_call(split(cmd.format(kubeconfig, user, key, cert)))
# Create a default context with the cluster.
cmd = 'kubectl config set-context --kubeconfig={0} {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster_name, user)))
# Make the config use this new context.
cmd = 'kubectl config use-context --kubeconfig={0} {1}'
check_call(split(cmd.format(kubeconfig, context)))
hookenv.log('kubectl configuration created at {0}.'.format(kubeconfig))
return kubeconfig
def get_dns_ip(cidr):
'''Get an IP address for the DNS server on the provided cidr.'''
# Remove the range from the cidr.
ip = cidr.split('/')[0]
# Take the last octet off the IP address and replace it with 10.
return '.'.join(ip.split('.')[0:-1]) + '.10'
def get_sdn_ip(cidr):
'''Get the IP address for the SDN gateway based on the provided cidr.'''
# Remove the range from the cidr.
ip = cidr.split('/')[0]
# Remove the last octet and replace it with 1.
return '.'.join(ip.split('.')[0:-1]) + '.1'
def render_files(reldata=None):
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
# Load the context data with SDN data.
context.update(gather_sdn_data())
# Add the charm configuration data to the context.
context.update(hookenv.config())
if reldata:
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/etc/ssl/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
# Update the context so the template has the etcd information.
context.update({'etcd_dir': etcd_dir,
'connection_string': connection_string,
'etcd_ca': ca,
'etcd_key': key,
'etcd_cert': cert})
charm_dir = hookenv.charm_dir()
rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
if not os.path.exists(rendered_kube_dir):
os.makedirs(rendered_kube_dir)
rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
if not os.path.exists(rendered_manifest_dir):
os.makedirs(rendered_manifest_dir)
# Update the context with extra values, arch, manifest dir, and private IP.
context.update({'arch': arch(),
'master_address': leader_get('master-address'),
'manifest_directory': rendered_manifest_dir,
'public_address': hookenv.unit_get('public-address'),
'private_address': hookenv.unit_get('private-address')})
# Adapted from: http://kubernetes.io/docs/getting-started-guides/docker/
target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
# Render the files/kubernetes/docker-compose.yml file that contains the
# definition for kubelet and proxy.
render('docker-compose.yml', target, context)
if is_leader():
# Source: https://github.com/kubernetes/...master/cluster/images/hyperkube # noqa
target = os.path.join(rendered_manifest_dir, 'master.json')
# Render the files/manifests/master.json that contains parameters for
# the apiserver, controller, and controller-manager
render('master.json', target, context)
# Source: ...cluster/addons/dns/kubedns-svc.yaml.in
target = os.path.join(rendered_manifest_dir, 'kubedns-svc.yaml')
# Render files/kubernetes/kubedns-svc.yaml for the DNS service.
render('kubedns-svc.yaml', target, context)
# Source: ...cluster/addons/dns/kubedns-controller.yaml.in
target = os.path.join(rendered_manifest_dir, 'kubedns-controller.yaml')
# Render files/kubernetes/kubedns-controller.yaml for the DNS pod.
render('kubedns-controller.yaml', target, context)
def status_set(level, message):
'''Output status message with leadership information.'''
if is_leader():
message = '{0} (master) '.format(message)
hookenv.status_set(level, message)
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
# Validate the architecture is supported by kubernetes.
if architecture not in ['amd64', 'arm', 'arm64', 'ppc64le', 's390x']:
message = 'Unsupported machine architecture: {0}'.format(architecture)
status_set('blocked', message)
raise Exception(message)
return architecture
| apache-2.0 |
akosyakov/intellij-community | plugins/hg4idea/testData/bin/hgext/gpg.py | 90 | 9365 | # Copyright 2005, 2006 Benoit Boissinot <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''commands to sign and verify changesets'''
import os, tempfile, binascii
from mercurial import util, commands, match, cmdutil
from mercurial import node as hgnode
from mercurial.i18n import _
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
class gpg(object):
def __init__(self, path, key=None):
self.path = path
self.key = (key and " --local-user \"%s\"" % key) or ""
def sign(self, data):
gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
return util.filter(data, gpgcmd)
def verify(self, data, sig):
""" returns of the good and bad signatures"""
sigfile = datafile = None
try:
# create temporary files
fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
fp = os.fdopen(fd, 'wb')
fp.write(sig)
fp.close()
fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
fp = os.fdopen(fd, 'wb')
fp.write(data)
fp.close()
gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
"\"%s\" \"%s\"" % (self.path, sigfile, datafile))
ret = util.filter("", gpgcmd)
finally:
for f in (sigfile, datafile):
try:
if f:
os.unlink(f)
except OSError:
pass
keys = []
key, fingerprint = None, None
err = ""
for l in ret.splitlines():
# see DETAILS in the gnupg documentation
# filter the logger output
if not l.startswith("[GNUPG:]"):
continue
l = l[9:]
if l.startswith("ERRSIG"):
err = _("error while verifying signature")
break
elif l.startswith("VALIDSIG"):
# fingerprint of the primary key
fingerprint = l.split()[10]
elif (l.startswith("GOODSIG") or
l.startswith("EXPSIG") or
l.startswith("EXPKEYSIG") or
l.startswith("BADSIG")):
if key is not None:
keys.append(key + [fingerprint])
key = l.split(" ", 2)
fingerprint = None
if err:
return err, []
if key is not None:
keys.append(key + [fingerprint])
return err, keys
def newgpg(ui, **opts):
"""create a new gpg instance"""
gpgpath = ui.config("gpg", "cmd", "gpg")
gpgkey = opts.get('key')
if not gpgkey:
gpgkey = ui.config("gpg", "key", None)
return gpg(gpgpath, gpgkey)
def sigwalk(repo):
"""
walk over every sigs, yields a couple
((node, version, sig), (filename, linenumber))
"""
def parsefile(fileiter, context):
ln = 1
for l in fileiter:
if not l:
continue
yield (l.split(" ", 2), (context, ln))
ln += 1
# read the heads
fl = repo.file(".hgsigs")
for r in reversed(fl.heads()):
fn = ".hgsigs|%s" % hgnode.short(r)
for item in parsefile(fl.read(r).splitlines(), fn):
yield item
try:
# read local signatures
fn = "localsigs"
for item in parsefile(repo.opener(fn), fn):
yield item
except IOError:
pass
def getkeys(ui, repo, mygpg, sigdata, context):
"""get the keys who signed a data"""
fn, ln = context
node, version, sig = sigdata
prefix = "%s:%d" % (fn, ln)
node = hgnode.bin(node)
data = node2txt(repo, node, version)
sig = binascii.a2b_base64(sig)
err, keys = mygpg.verify(data, sig)
if err:
ui.warn("%s:%d %s\n" % (fn, ln , err))
return None
validkeys = []
# warn for expired key and/or sigs
for key in keys:
if key[0] == "BADSIG":
ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
continue
if key[0] == "EXPSIG":
ui.write(_("%s Note: Signature has expired"
" (signed by: \"%s\")\n") % (prefix, key[2]))
elif key[0] == "EXPKEYSIG":
ui.write(_("%s Note: This key has expired"
" (signed by: \"%s\")\n") % (prefix, key[2]))
validkeys.append((key[1], key[2], key[3]))
return validkeys
@command("sigs", [], _('hg sigs'))
def sigs(ui, repo):
"""list signed changesets"""
mygpg = newgpg(ui)
revs = {}
for data, context in sigwalk(repo):
node, version, sig = data
fn, ln = context
try:
n = repo.lookup(node)
except KeyError:
ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
continue
r = repo.changelog.rev(n)
keys = getkeys(ui, repo, mygpg, data, context)
if not keys:
continue
revs.setdefault(r, [])
revs[r].extend(keys)
for rev in sorted(revs, reverse=True):
for k in revs[rev]:
r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
ui.write("%-30s %s\n" % (keystr(ui, k), r))
@command("sigcheck", [], _('hg sigcheck REV'))
def check(ui, repo, rev):
"""verify all the signatures there may be for a particular revision"""
mygpg = newgpg(ui)
rev = repo.lookup(rev)
hexrev = hgnode.hex(rev)
keys = []
for data, context in sigwalk(repo):
node, version, sig = data
if node == hexrev:
k = getkeys(ui, repo, mygpg, data, context)
if k:
keys.extend(k)
if not keys:
ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
return
# print summary
ui.write("%s is signed by:\n" % hgnode.short(rev))
for key in keys:
ui.write(" %s\n" % keystr(ui, key))
def keystr(ui, key):
"""associate a string to a key (username, comment)"""
keyid, user, fingerprint = key
comment = ui.config("gpg", fingerprint, None)
if comment:
return "%s (%s)" % (user, comment)
else:
return user
@command("sign",
[('l', 'local', None, _('make the signature local')),
('f', 'force', None, _('sign even if the sigfile is modified')),
('', 'no-commit', None, _('do not commit the sigfile after signing')),
('k', 'key', '',
_('the key id to sign with'), _('ID')),
('m', 'message', '',
_('commit message'), _('TEXT')),
] + commands.commitopts2,
_('hg sign [OPTION]... [REV]...'))
def sign(ui, repo, *revs, **opts):
"""add a signature for the current or given revision
If no revision is given, the parent of the working directory is used,
or tip if no revision is checked out.
See :hg:`help dates` for a list of formats valid for -d/--date.
"""
mygpg = newgpg(ui, **opts)
sigver = "0"
sigmessage = ""
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
if revs:
nodes = [repo.lookup(n) for n in revs]
else:
nodes = [node for node in repo.dirstate.parents()
if node != hgnode.nullid]
if len(nodes) > 1:
raise util.Abort(_('uncommitted merge - please provide a '
'specific revision'))
if not nodes:
nodes = [repo.changelog.tip()]
for n in nodes:
hexnode = hgnode.hex(n)
ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
hgnode.short(n)))
# build data
data = node2txt(repo, n, sigver)
sig = mygpg.sign(data)
if not sig:
raise util.Abort(_("error while signing"))
sig = binascii.b2a_base64(sig)
sig = sig.replace("\n", "")
sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
# write it
if opts['local']:
repo.opener.append("localsigs", sigmessage)
return
msigs = match.exact(repo.root, '', ['.hgsigs'])
s = repo.status(match=msigs, unknown=True, ignored=True)[:6]
if util.any(s) and not opts["force"]:
raise util.Abort(_("working copy of .hgsigs is changed "
"(please commit .hgsigs manually "
"or use --force)"))
sigsfile = repo.wfile(".hgsigs", "ab")
sigsfile.write(sigmessage)
sigsfile.close()
if '.hgsigs' not in repo.dirstate:
repo[None].add([".hgsigs"])
if opts["no_commit"]:
return
message = opts['message']
if not message:
# we don't translate commit messages
message = "\n".join(["Added signature for changeset %s"
% hgnode.short(n)
for n in nodes])
try:
repo.commit(message, opts['user'], opts['date'], match=msigs)
except ValueError, inst:
raise util.Abort(str(inst))
def node2txt(repo, node, ver):
"""map a manifest into some text"""
if ver == "0":
return "%s\n" % hgnode.hex(node)
else:
raise util.Abort(_("unknown signature version"))
| apache-2.0 |
danieldresser/cortex | test/IECore/ImageConvolveOpTest.py | 12 | 2389 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
from IECore import *
class ImageConvolveOpTest( unittest.TestCase ) :
def testConstruction( self ) :
op = ImageConvolveOp()
def testSimple( self ) :
img = Reader.create( "test/IECore/data/exrFiles/colorBarsWithAlpha.exr" ).read()
op = ImageConvolveOp()
result = op(
input = img,
)
expectedResult = Reader.create( "test/IECore/data/expectedResults/convolvedColorBarsWithAlpha.exr" ).read()
diffOp = ImageDiffOp()
diff = diffOp( imageA = result, imageB = expectedResult ).value
self.failIf( diff )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
AltSchool/django-allauth | allauth/socialaccount/providers/trello/provider.py | 3 | 1169 | from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class TrelloAccount(ProviderAccount):
def get_profile_url(self):
return None
def get_avatar_url(self):
return None
class TrelloProvider(OAuthProvider):
id = 'trello'
name = 'Trello'
account_class = TrelloAccount
def get_default_scope(self):
return ['read']
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(
email=data.get('email'),
username=data.get('username'),
name=data.get('name'),
)
def get_auth_params(self, request, action):
data = super(TrelloProvider, self).get_auth_params(request, action)
app = self.get_app(request)
data['type'] = 'web_server'
data['name'] = app.name
data['scope'] = self.get_scope(request)
# define here for how long it will be, this can be configured on the
# social app
data['expiration'] = 'never'
return data
provider_classes = [TrelloProvider]
| mit |
ravindrapanda/tensorflow | tensorflow/contrib/kfac/examples/mlp_mnist_main.py | 21 | 1949 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train an MLP on MNIST using K-FAC.
See mlp.py for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mlp
FLAGS = None
def main(argv):
_ = argv
if FLAGS.use_estimator:
if FLAGS.num_towers != 1:
raise ValueError("Only 1 device supported in tf.estimator example.")
mlp.train_mnist_estimator(FLAGS.data_dir, num_epochs=200)
elif FLAGS.num_towers > 1:
mlp.train_mnist_multitower(
FLAGS.data_dir, num_epochs=200, num_towers=FLAGS.num_towers)
else:
mlp.train_mnist(FLAGS.data_dir, num_epochs=200)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/mnist",
help="Directory to store dataset in.")
parser.add_argument(
"--num_towers",
type=int,
default=1,
help="Number of CPUs to split minibatch across.")
parser.add_argument(
"--use_estimator",
action="store_true",
help="Use tf.estimator API to train.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
projectcalico/calico-neutron | neutron/plugins/bigswitch/extensions/routerrule.py | 4 | 4565 | # Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as nexception
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# Router Rules Exceptions
class InvalidRouterRules(nexception.InvalidInput):
message = _("Invalid format for router rules: %(rule)s, %(reason)s")
class RulesExhausted(nexception.BadRequest):
message = _("Unable to complete rules update for %(router_id)s. "
"The number of rules exceeds the maximum %(quota)s.")
def convert_to_valid_router_rules(data):
"""
Validates and converts router rules to the appropriate data structure
Example argument = [{'source': 'any', 'destination': 'any',
'action':'deny'},
{'source': '1.1.1.1/32', 'destination': 'external',
'action':'permit',
'nexthops': ['1.1.1.254', '1.1.1.253']}
]
"""
V4ANY = '0.0.0.0/0'
CIDRALL = ['any', 'external']
if not isinstance(data, list):
emsg = _("Invalid data format for router rule: '%s'") % data
LOG.debug(emsg)
raise nexception.InvalidInput(error_message=emsg)
_validate_uniquerules(data)
rules = []
expected_keys = ['source', 'destination', 'action']
for rule in data:
rule['nexthops'] = rule.get('nexthops', [])
if not isinstance(rule['nexthops'], list):
rule['nexthops'] = rule['nexthops'].split('+')
src = V4ANY if rule['source'] in CIDRALL else rule['source']
dst = V4ANY if rule['destination'] in CIDRALL else rule['destination']
errors = [attr._verify_dict_keys(expected_keys, rule, False),
attr._validate_subnet(dst),
attr._validate_subnet(src),
_validate_nexthops(rule['nexthops']),
_validate_action(rule['action'])]
errors = [m for m in errors if m]
if errors:
LOG.debug(errors)
raise nexception.InvalidInput(error_message=errors)
rules.append(rule)
return rules
def _validate_nexthops(nexthops):
seen = []
for ip in nexthops:
msg = attr._validate_ip_address(ip)
if ip in seen:
msg = _("Duplicate nexthop in rule '%s'") % ip
seen.append(ip)
if msg:
return msg
def _validate_action(action):
if action not in ['permit', 'deny']:
return _("Action must be either permit or deny."
" '%s' was provided") % action
def _validate_uniquerules(rules):
pairs = []
for r in rules:
if 'source' not in r or 'destination' not in r:
continue
pairs.append((r['source'], r['destination']))
if len(set(pairs)) != len(pairs):
error = _("Duplicate router rules (src,dst) found '%s'") % pairs
LOG.debug(error)
raise nexception.InvalidInput(error_message=error)
class Routerrule(object):
@classmethod
def get_name(cls):
return "Neutron Router Rule"
@classmethod
def get_alias(cls):
return "router_rules"
@classmethod
def get_description(cls):
return "Router rule configuration for L3 router"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/routerrules/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-05-23T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
# Attribute Map
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
'router_rules': {'allow_post': False, 'allow_put': True,
'convert_to': convert_to_valid_router_rules,
'is_visible': True,
'default': attr.ATTR_NOT_SPECIFIED},
}
}
| apache-2.0 |
bruinAlex/indoor-stomach | exercise_2/extweetwordcount/src/bolts/wordcount.py | 1 | 1599 | from __future__ import absolute_import, print_function, unicode_literals
from collections import Counter
from streamparse.bolt import Bolt
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
class WordCounter(Bolt):
def initialize(self, conf, ctx):
self.counts = Counter()
def process(self, tup):
word = tup.values[0]
# Write code to increment the word count in Postgres
# Use psycopg to interact with Postgres
# Database name: tcount
# Table name: tweetwordcount
# you need to create both the database and the table in advance.
# Increment the local count
self.counts[word] += 1
self.emit([word, self.counts[word]])
uWord = word
uCount = self.counts[word]
# Connect to tcount database
conn = psycopg2.connect(database="tcount", user="postgres", password="pass", host="localhost", port="5432") #password may not be necessary
# Update the table if the word exists
cur = conn.cursor()
cur.execute("UPDATE tweetwordcount SET count=%s WHERE word=%s;", (uCount, uWord))
conn.commit()
# Insert the word and its count into the table if the word is not found
cur.execute("INSERT INTO tweetwordcount (word, count) SELECT %s, %s WHERE NOT EXISTS (SELECT 1 FROM tweetwordcount WHERE word=%s);", (uWord, uCount, uWord))
conn.commit()
cur.close()
conn.close()
# Log the count - just to see the topology running
self.log('%s: %d' % (word, self.counts[word]))
| mit |
t0in4/django | django/core/cache/backends/filebased.py | 428 | 5387 | "File-based cache backend"
import errno
import glob
import hashlib
import io
import os
import random
import tempfile
import time
import zlib
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.files.move import file_move_safe
from django.utils.encoding import force_bytes
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class FileBasedCache(BaseCache):
cache_suffix = '.djcache'
def __init__(self, dir, params):
super(FileBasedCache, self).__init__(params)
self._dir = os.path.abspath(dir)
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version):
return False
self.set(key, value, timeout, version)
return True
def get(self, key, default=None, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
try:
with io.open(fname, 'rb') as f:
if not self._is_expired(f):
return pickle.loads(zlib.decompress(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
pass # Cache file was removed after the exists check
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
self._createdir() # Cache dir can be deleted at any time.
fname = self._key_to_file(key, version)
self._cull() # make some room if necessary
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with io.open(fd, 'wb') as f:
expiry = self.get_backend_timeout(timeout)
f.write(pickle.dumps(expiry, -1))
f.write(zlib.compress(pickle.dumps(value), -1))
file_move_safe(tmp_path, fname, allow_overwrite=True)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def delete(self, key, version=None):
self._delete(self._key_to_file(key, version))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except OSError as e:
# ENOENT can happen if the cache file is removed (by another
# process) after the os.path.exists check.
if e.errno != errno.ENOENT:
raise
def has_key(self, key, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
with io.open(fname, 'rb') as f:
return not self._is_expired(f)
return False
def _cull(self):
"""
Removes random cache entries if max_entries is reached at a ratio
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
that the entire cache will be purged.
"""
filelist = self._list_cache_files()
num_entries = len(filelist)
if num_entries < self._max_entries:
return # return early if no culling is required
if self._cull_frequency == 0:
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
# Delete a random selection of entries
filelist = random.sample(filelist,
int(num_entries / self._cull_frequency))
for fname in filelist:
self._delete(fname)
def _createdir(self):
if not os.path.exists(self._dir):
try:
os.makedirs(self._dir, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise EnvironmentError(
"Cache directory '%s' does not exist "
"and could not be created'" % self._dir)
def _key_to_file(self, key, version=None):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the key and a suffix.
"""
key = self.make_key(key, version=version)
self.validate_key(key)
return os.path.join(self._dir, ''.join(
[hashlib.md5(force_bytes(key)).hexdigest(), self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _is_expired(self, f):
"""
Takes an open cache file and determines if it has expired,
deletes the file if it is has passed its expiry time.
"""
exp = pickle.load(f)
if exp is not None and exp < time.time():
f.close() # On Windows a file has to be closed before deleting
self._delete(f.name)
return True
return False
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
filelist = [os.path.join(self._dir, fname) for fname
in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
return filelist
| bsd-3-clause |
KingxBanana/zulip | zerver/management/commands/send_password_reset_email.py | 8 | 4172 | from __future__ import absolute_import
import logging
from typing import Any, List, Optional, Text
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.core.mail import send_mail, BadHeaderError
from zerver.forms import PasswordResetForm
from zerver.models import UserProfile, get_user_profile_by_email, get_realm_by_string_id
from django.template import loader
from django.core.mail import EmailMultiAlternatives
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.contrib.auth.tokens import default_token_generator, PasswordResetTokenGenerator
class Command(BaseCommand):
help = """Send email to specified email address."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--to', metavar='<to>', type=str,
help="email of user to send the email")
parser.add_argument('--realm', metavar='<realm>', type=str,
help="realm to send the email to all users in")
parser.add_argument('--server', metavar='<server>', type=str,
help="If you specify 'YES' will send to everyone on server")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
if options["to"]:
users = [get_user_profile_by_email(options["to"])]
elif options["realm"]:
realm = get_realm_by_string_id(options["realm"])
users = UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False,
is_mirror_dummy=False)
elif options["server"] == "YES":
users = UserProfile.objects.filter(is_active=True, is_bot=False,
is_mirror_dummy=False)
else:
raise RuntimeError("Missing arguments")
self.send(users)
def send(self, users,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.txt',
use_https=True, token_generator=default_token_generator,
from_email=None, html_email_template_name=None):
# type: (List[UserProfile], str, str, bool, PasswordResetTokenGenerator, Optional[Text], Optional[str]) -> None
"""Sends one-use only links for resetting password to target users
"""
for user_profile in users:
context = {
'email': user_profile.email,
'domain': user_profile.realm.host,
'site_name': "zulipo",
'uid': urlsafe_base64_encode(force_bytes(user_profile.pk)),
'user': user_profile,
'token': token_generator.make_token(user_profile),
'protocol': 'https' if use_https else 'http',
}
logging.warning("Sending %s email to %s" % (email_template_name, user_profile.email,))
self.send_mail(subject_template_name, email_template_name,
context, from_email, user_profile.email,
html_email_template_name=html_email_template_name)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
# type: (str, str, Dict[str, Any], Text, Text, Optional[str]) -> None
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
| apache-2.0 |
thisispuneet/potato-blog | django/core/management/commands/makemessages.py | 154 | 16507 | import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.text import get_text_list
pythonize_re = re.compile(r'(?:^|\n)\s*//')
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
def handle_extensions(extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
for example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ','').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
# we don't want *.py files here because of the way non-*.py files
# are handled in make_messages() (they are copied to file.ext.py files to
# trick xgettext to parse them as Python files)
return set([x for x in ext_list if x != '.py'])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
return p.communicate()
def walk(root, topdown=True, onerror=None, followlinks=False):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(".", followlinks=symlinks):
for f in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, f))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
sys.stdout.write('ignoring file %s in %s\n' % (f, dirpath))
else:
all_files.extend([(dirpath, f)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
import django
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
m = plural_forms_re.search(open(django_po, 'rU').read())
if m:
if verbosity > 1:
sys.stderr.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def make_messages(locale=None, domain='django', verbosity='1', all=False,
extensions=None, symlinks=False, ignore_patterns=[], no_wrap=False,
no_obsolete=False):
"""
Uses the locale directory from the Django SVN tree or an application/
project to process all
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
# Ignoring all contrib apps
ignore_patterns += ['contrib/*']
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = _popen('xgettext --version')[0]
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset." % match.group())
languages = []
if locale is not None:
languages.append(locale)
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
languages = [os.path.basename(l) for l in locale_dirs]
wrap = no_wrap and '--no-wrap' or ''
for locale in languages:
if verbosity > 0:
print "processing language", locale
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity, symlinks=symlinks):
file_base, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rU").read()
src = pythonize_re.sub('\n#', src)
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(src)
finally:
f.close()
cmd = (
'xgettext -d %s -L Perl %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile)
)
)
msgs, errors = _popen(cmd)
if errors:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
if msgs:
old = '#: ' + os.path.join(dirpath, thefile)[2:]
new = '#: ' + os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
if file_ext in extensions:
src = open(orig_file, "rU").read()
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(templatize(src, orig_file[2:]))
finally:
f.close()
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = (
'xgettext -d %s -L Python %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
if msgs:
if thefile != file:
old = '#: ' + os.path.join(dirpath, thefile)[2:]
new = '#: ' + orig_file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
msgs, errors = _popen('msguniq %s --to-code=utf-8 "%s"' %
(wrap, potfile))
if errors:
os.unlink(potfile)
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
if os.path.exists(pofile):
f = open(potfile, 'w')
try:
f.write(msgs)
finally:
f.close()
msgs, errors = _popen('msgmerge %s -q "%s" "%s"' %
(wrap, pofile, potfile))
if errors:
os.unlink(potfile)
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif not invoked_for_django:
msgs = copy_plural_forms(msgs, locale, domain, verbosity)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
f = open(pofile, 'wb')
try:
f.write(msgs)
finally:
f.close()
os.unlink(potfile)
if no_obsolete:
msgs, errors = _popen('msgattrib %s -o "%s" --no-obsolete "%s"' %
(wrap, pofile, pofile))
if errors:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings"),
)
help = ( "Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
can_import_settings = False
def handle_noargs(self, *args, **options):
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
no_obsolete = options.get('no_obsolete')
if domain == 'djangojs':
extensions = handle_extensions(extensions or ['js'])
else:
extensions = handle_extensions(extensions or ['html'])
if verbosity > 1:
sys.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions, symlinks, ignore_patterns, no_wrap, no_obsolete)
| bsd-3-clause |
Hao-Liu/tp-libvirt | libvirt/tests/src/nwfilter/nwfilter_edit_uuid.py | 7 | 2275 | import logging
from autotest.client.shared import error
from virttest import virsh
from virttest import libvirt_xml
from virttest import aexpect
from virttest import remote
from provider import libvirt_version
def run(test, params, env):
"""
Test virsh nwfilter-edit with uuid.
1) Prepare parameters.
2) Run nwfilter-edit command.
3) Check result.
4) Clean env
"""
# Prepare parameters
filter_name = params.get("edit_filter_name", "")
status_error = params.get("status_error", "no")
new_uuid = "11111111-1111-1111-1111-111111111111"
edit_cmd = ":2s/<uuid>.*$/<uuid>%s<\/uuid>/" % new_uuid
# Since commit 46a811d, the logic changed for not allow update filter
# uuid, so decide status_error with libvirt version.
if libvirt_version.version_compare(1, 2, 7):
status_error = True
else:
status_error = False
# Backup filter xml
new_filter = libvirt_xml.NwfilterXML()
filterxml = new_filter.new_from_filter_dumpxml(filter_name)
logging.debug("the filter xml is: %s" % filterxml.xmltreefile)
try:
# Run command
session = aexpect.ShellSession("sudo -s")
try:
session.sendline("virsh nwfilter-edit %s" % filter_name)
session.sendline(edit_cmd)
# Press ESC
session.send('\x1b')
# Save and quit
session.send('ZZ')
remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
session.close()
if not status_error:
logging.info("Succeed to do nwfilter edit")
else:
raise error.TestFail("edit uuid should fail but got succeed.")
except (aexpect.ShellError, aexpect.ExpectError, remote.LoginTimeoutError), details:
log = session.get_output()
session.close()
if "Try again? [y,n,f,?]:" in log and status_error:
logging.debug("edit uuid failed as expected.")
else:
raise error.TestFail("Failed to do nwfilter-edit: %s\n%s"
% (details, log))
finally:
# Clean env
virsh.nwfilter_undefine(filter_name, debug=True)
virsh.nwfilter_define(filterxml.xml, debug=True)
| gpl-2.0 |
sidmitra/django_nonrel_testapp | django/contrib/localflavor/pt/forms.py | 309 | 1561 | """
PT-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(\d{9}|(00|\+)\d*)$')
class PTZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PTZipCodeField, self).__init__(r'^(\d{4}-\d{3}|\d{7})$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
cleaned = super(PTZipCodeField, self).clean(value)
if len(cleaned) == 7:
return u'%s-%s' % (cleaned[:4],cleaned[4:])
else:
return cleaned
class PTPhoneNumberField(Field):
"""
Validate local Portuguese phone number (including international ones)
It should have 9 digits (may include spaces) or start by 00 or + (international)
"""
default_error_messages = {
'invalid': _('Phone numbers must have 9 digits, or start by + or 00.'),
}
def clean(self, value):
super(PTPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s' % value
raise ValidationError(self.error_messages['invalid'])
| bsd-3-clause |
KristopherJH/ENGI7854US | Despeckle.py | 1 | 10592 | import cv2
import matplotlib.pyplot as plt
import numpy as np
import UltraLibrary as ul
from FrameType import f_type
def hist_despeckle(img,goodImg):
dsimg, homog = quickieHomo(img)
return ul.filtered_match(img,dsimg,goodImg)
def detection(im):
detector = cv2.SimpleBlobDetector_create()
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
def hist_first(original, goodImg,*args, **kwargs):
img = ul.global_histogram(original,goodImg)
systolic = despeckle_thresh(original.astype(np.uint8), *args, **kwargs)
return systolic
def despeckle_thresh(img, countarray,diffarray,index, systracker):
dsimg, homog = quickHomog(img)
homog2 = homog[80:200,200:400]
#detection(homog)
#thresholding(dsimg,homog2)
#ret, threshUF = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#cv2.imshow('Unfiltered', threshUF)
#kernel = np.ones((3,3),np.uint8)
#opening= cv2.morphologyEx(homog2,cv2.MORPH_OPEN,kernel, iterations = 1)
edges = cv2.Canny(homog2, threshold1 = 50, threshold2 = 100)
cv2.imshow('edges', edges)
#cv2.imshow('dialtion', opening)
countarray[index[0]] = np.sum(edges)/255
if index[0] > 5:
diffarray[index[0]] = np.mean(countarray[index[0]-5:index[0]])
else:
diffarray[index[0]] = np.mean(countarray[:index[0]])
systolic = countarray[index[0]] < diffarray[index[0]]
if systolic != systracker[0]:
if index[0] - systracker[1] <= 10:
systolic = not systolic
else:
systracker[0] = systolic
systracker[1] = index[0]
#print(systolic, countarray[index[0]], diffarray[index[0]])
image =np.zeros((120,200,3), np.uint8)
if systolic:
image[:,:,1] = 255
else:
image[:,:,2] = 255
index[0] = index[0] + 1
cv2.imshow('systolic',image)
#plt.cla()
#plt.plot(counts[:index[0]-1])
#plt.title(video)
#plt.pause(0.001)
return systolic
def thresholding(img,other):
# otsu thresh
ret, thresh = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#thresh = cv2.adaptiveThreshold(img, 1, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 55, -3)
cv2.imshow('threshold',thresh)
thresh = other
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 5)
#closing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel, iterations = 5)
#cv2.imshow('opening',opening)
# sure background area
#sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
#dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
#ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
#sure_fg = np.uint8(sure_fg)
#unknown = cv2.subtract(sure_bg,sure_fg)
#cv2.imshow('bg',sure_bg)
#cv2.imshow('fg',sure_fg)
# cv2.imshow('unknown',unknown)
#cv2.imshow('dist',dist_transform)
return thresh
def quickHomog(img):
hScaler = 2
wSize = 7
hMat = np.zeros(img.shape)
mean = cv2.blur(img.astype(np.float64),(wSize,wSize))
#cv2.imshow('mean', mean)
moment2 = cv2.blur(np.multiply(img,img).astype(np.float64), (wSize,wSize))
dev = moment2-np.multiply(mean,mean)
median = cv2.medianBlur(img,wSize)
gaussian = cv2.GaussianBlur(img,(wSize,wSize),sigmaX=1)
mean_mean = np.mean(mean)
mean_dev = np.mean(dev)
#
hthresh = np.ones(img.shape)*mean_dev**2/mean_mean/hScaler
hVal = np.divide( np.multiply(dev,dev), mean)
hMat = np.less_equal(hVal,hthresh)
zeromean = np.less(mean,3)
#hMat = np.multiply(hMat,np.logical_not(zeromean))
hMat = np.logical_or(hMat,zeromean)
gaussians = np.multiply(hMat,gaussian)
medians = np.multiply(np.logical_not(hMat),median)
newimg = gaussians+medians
cv2.imshow('homogeny',hMat.astype(np.uint8)*255)
return newimg, hMat.astype(np.uint8)*255
def despeckle(img):
winSize = 7 #nxn window for filtering
halfWin = winSize/2
#typical h for homogeneous region is 1.6 at window size 7x7
# can play with this value to determine optimal threshold
highThresh = 1
sigmaX = 1
sigmaY = 1
pad = halfWin + 1 #how many pixels to pad the image
hMat = np.zeros(img.shape)
img = cv2.copyMakeBorder(img,pad,pad,pad,pad,cv2.BORDER_REFLECT)
size = img.shape
newimg = np.zeros(size)
hMat = np.zeros(size)
#generating gaussian kernel
kernelX = cv2.getGaussianKernel(winSize, sigmaX)
kernelY = cv2.getGaussianKernel(winSize, sigmaY)
Gaussian = np.matmul(kernelX, np.transpose(kernelY))
#loop through all original pixels
for i in range(pad+1,size[0]-pad+1):
for j in range(pad+1,size[1]-pad+1):
W = img[i-halfWin:i+halfWin+1,j-halfWin:j+halfWin+1]
mean = np.mean(W)
vari = np.var(W)
if mean == 0:
h = 0
else:
h = vari/mean
if h> highThresh:
# newimg[i,j] = np.median(W)
pass
else:
#newimg[i,j] = np.sum(np.multiply(Gaussian,W))
hMat[i,j] = 1
#print(i,j, newimg[i,j])
newimg = newimg.astype(np.uint8)
newimg = newimg[pad+1:size[0]-pad+1, pad+1:size[1]-pad+1]
cv2.imshow('despeckled', newimg)
cv2.imshow('speckled', img)
#plt.imshow(newimg, cmap='gray')
#plt.xticks([])
#plt.yticks([])
#plt.show()
cv2.imshow('homogeny', hMat)
#gimg = cv2.GaussianBlur(img, (7,7),sigmaX = 1);
#cv2.imshow('gaussoan', gimg)
#mimg = cv2.medianBlur(img, 7);
#cv2.imshow('median', mimg)
cv2.waitKey(33)
return newimg.astype(np.uint8),hMat
if __name__ == "__main__":
goodImg = cv2.imread('GoodImages\\3-A.png')
vids = ['Videos/1-A.mp4', 'Videos/1-B.mp4', 'Videos/2-A.mp4', 'Videos/2-B.mp4',
'Videos/3-A.mp4', 'Videos/3-B.mp4', 'Videos/4-A.mp4', 'Videos/4-B.mp4',
'Videos/5-A.mp4', 'Videos/5-B.mp4', 'Videos/Varying.mp4']
goodImg = cv2.imread('GoodImages\\3-A.PNG',0)
vids =['Videos/Varying.mp4']
for video in vids:
cap = cv2.VideoCapture(video)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
counts = np.zeros(length)
diffs = np.zeros(length)
index = [0]
#plt.ion()
systracker = np.zeros(2)
ul.runVideo(video, hist_first, goodImg, counts,diffs,index, systracker)
line_c, = plt.plot(counts, label = 'Pixel Counts')
mean = diffs
dev = np.var(counts)**0.5
hthresh = mean+dev/2
lthresh = mean-dev/2
line_m, = plt.plot(mean, label = 'Systoles Threshold')
systolic = np.less(counts,mean).astype(np.uint8)
systolic = systolic*(counts.max()-counts.min()) + counts.min()
#plt.plot(systolic)
#plt.plot(hthresh)
#plt.plot(lthresh)
#plt.plot(diffs)
plt.title('White Pixel Counts with Prior Histogram Equalization')
plt.xlabel('Frame #')
plt.ylabel('Number of White Pixels')
plt.legend(handles = [line_c, line_m])
plt.show()
"""
# otsu thresh
#img = cv2.imread('GoodImages\\5-A.png')
#img = ul.stripFrame(img)
img = cv2.imread('Segmentation\\despeckled_3-A_h1.7.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('threshold',thresh)
cv2.waitKey(0)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
cv2.imshow('bg',sure_bg)
cv2.imshow('fg',sure_fg)
cv2.imshow('unknown',unknown)
cv2.imshow('dist',dist_transform)
cv2.waitKey(0)
"""
"""
#find and save despeckled image
speckImg = cv2.imread('GoodImages\\5-A.PNG',0)
speckImg = ul.stripFrame(speckImg)
despeckImg, hmat= quickieHomo(speckImg)
hmat = hmat*255
cv2.imshow('homogeny', hmat)
cv2.imshow('orig', speckImg)
cv2.imshow('despeck', despeckImg)
cv2.waitKey(0)
#cv2.imwrite('Segmentation\\despeckled_3-A_h1.png', despeckImg)
"""
"""
cap = cv2.VideoCapture('Videos\\4-A.mp4')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out1 = cv2.VideoWriter('Videos\\despeckle_4-A.avi',fourcc, 5, (640,480))
out2 = cv2.VideoWriter('Videos\\homo_4-A.avi',fourcc, 5, (640,480))
ret = True
i = 1
while i <= 5:
ret,frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
print(i)
i+=1
if ret == True:
frame = ul.stripFrame(frame)
newframe,hFrame = despeckle(frame)
out1.write(newframe)
out2.write(newframe)
else:
break
cap.release()
out2.release()
out1.release()
cv2.destroyAllWindows()
"""
"""
h0 =0
w0 = 0
goodImg = ul.stripFrame(cv2.imread('GoodImages\\3-A.png',0))
img = ul.stripFrame(cv2.imread('GoodImages\\5-A.png',0))
im2 = ul.stripFrame(cv2.imread('GoodImages\\5-A.png',0))
size = img.shape
img = cv2.GaussianBlur(img, (3,3),sigmaX = 1);
cv2.imshow('median', img)
cv2.waitKey(0)
img = ul.global_histogram(img,goodImg)
#img = cv2.copyMakeBorder(img,50,50,50,50,cv2.BORDER_REFLECT)
size = img.shape
cv2.imshow('hist_median', img)
cv2.waitKey(0)
im2 = ul.global_histogram(im2,goodImg)
cv2.imshow('no_median', im2)
cv2.waitKey(0)
"""
"""
for i in range(51,size[0]-50):
for j in range(51,size[1]-50):
homog = False
rSize = 11
while not homog:
W = img[i-rSize/2:i+rSize/2+1,j-rSize/2:j+rSize/2+1]
mean = np.sum(W)
var = np.var(W)
hij =var*var/mean
if hij < h0:
homog = True
"""
| gpl-3.0 |
landism/pants | tests/python/pants_test/engine/test_parsers.py | 4 | 10280 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import unittest
from textwrap import dedent
from pants.engine import parser
from pants.engine.objects import Resolvable
from pants_test.engine.examples import parsers
# A duck-typed Serializable with an `==` suitable for ease of testing.
class Bob(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
def _key(self):
return {k: v for k, v in self._kwargs.items() if k != 'type_alias'}
def __eq__(self, other):
return isinstance(other, Bob) and self._key() == other._key()
class EmptyTable(parser.SymbolTable):
@classmethod
def table(cls):
return {}
class TestTable(parser.SymbolTable):
@classmethod
def table(cls):
return {'bob': Bob}
class TestTable2(parser.SymbolTable):
@classmethod
def table(cls):
return {'nancy': Bob}
def parse(parser, document, symbol_table_cls, **args):
return parser.parse('/dev/null', document, symbol_table_cls, **args)
class JsonParserTest(unittest.TestCase):
def parse(self, document, symbol_table_cls=None, **kwargs):
symbol_table_cls = symbol_table_cls or EmptyTable
return parse(parsers.JsonParser, document, symbol_table_cls, **kwargs)
def round_trip(self, obj, symbol_table_cls=None):
document = parsers.encode_json(obj, inline=True)
return self.parse(document, symbol_table_cls=symbol_table_cls)
def test_comments(self):
document = dedent("""
# Top level comment.
{
# Nested comment
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([dict(hobbies=[1, 2, 3])], self.round_trip(results[0]))
def test_single(self):
document = dedent("""
# A simple example with a single Bob.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(hobbies=[1, 2, 3])], self.round_trip(results[0]))
self.assertEqual('pants_test.engine.test_parsers.Bob', results[0]._asdict()['type_alias'])
def test_symbol_table(self):
document = dedent("""
# An simple example with a single Bob.
{
"type_alias": "bob",
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document, symbol_table_cls=TestTable)
self.assertEqual(1, len(results))
self.assertEqual([Bob(hobbies=[1, 2, 3])],
self.round_trip(results[0], symbol_table_cls=TestTable))
self.assertEqual('bob', results[0]._asdict()['type_alias'])
def test_nested_single(self):
document = dedent("""
# An example with nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"uncle": {
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
},
"hobbies": [1, 2, 3]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(uncle=Bob(age=42), hobbies=[1, 2, 3])], self.round_trip(results[0]))
def test_nested_deep(self):
document = dedent("""
# An example with deeply nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"configs": [
{
"mappings": {
"uncle": {
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
}
}
}
]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(configs=[dict(mappings=dict(uncle=Bob(age=42)))])],
self.round_trip(results[0]))
def test_nested_many(self):
document = dedent("""
# An example with many nested Bobs.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"cousins": [
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"name": "Jake",
"age": 42
},
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"name": "Jane",
"age": 37
}
]
}
""")
results = self.parse(document)
self.assertEqual(1, len(results))
self.assertEqual([Bob(cousins=[Bob(name='Jake', age=42), Bob(name='Jane', age=37)])],
self.round_trip(results[0]))
def test_multiple(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"hobbies": [1, 2, 3]
}
# Another that is aged.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42
}
""")
results = self.parse(document)
self.assertEqual([Bob(hobbies=[1, 2, 3]), Bob(age=42)], results)
def test_tricky_spacing(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]} {
# This comment is inside an empty object that started on the prior line!
}
# Another that is aged.
{"type_alias": "pants_test.engine.test_parsers.Bob","age": 42}
""").strip()
results = self.parse(document)
self.assertEqual([Bob(hobbies=[1, 2, 3]), {}, Bob(age=42)], results)
def test_error_presentation(self):
document = dedent("""
# An example with several Bobs.
# One with hobbies.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]} {
# This comment is inside an empty object that started on the prior line!
}
# Another that is imaginary aged.
{
"type_alias": "pants_test.engine.test_parsers.Bob",
"age": 42i,
"four": 1,
"five": 1,
"six": 1,
"seven": 1,
"eight": 1,
"nine": 1
}
""").strip()
filepath = '/dev/null'
with self.assertRaises(parser.ParseError) as exc:
parsers.JsonParser.parse(filepath, document, symbol_table_cls=EmptyTable)
# Strip trailing whitespace from the message since our expected literal below will have
# trailing ws stripped via editors and code reviews calling for it.
actual_lines = [line.rstrip() for line in str(exc.exception).splitlines()]
# This message from the json stdlib varies between python releases, so fuzz the match a bit.
self.assertRegexpMatches(actual_lines[0],
r'Expecting (?:,|\',\'|",") delimiter: line 3 column 12 \(char 67\)')
self.assertEqual(dedent("""
In document at {filepath}:
# An example with several Bobs.
# One with hobbies.
{{
"type_alias": "pants_test.engine.test_parsers.Bob",
# And internal comment and blank lines.
"hobbies": [1, 2, 3]}} {{
# This comment is inside an empty object that started on the prior line!
}}
# Another that is imaginary aged.
1: {{
2: "type_alias": "pants_test.engine.test_parsers.Bob",
3: "age": 42i,
4: "four": 1,
5: "five": 1,
6: "six": 1,
7: "seven": 1,
8: "eight": 1,
9: "nine": 1
10: }}
""".format(filepath=filepath)).strip(), '\n'.join(actual_lines[1:]))
class JsonEncoderTest(unittest.TestCase):
def setUp(self):
bill = Bob(name='bill')
class SimpleResolvable(Resolvable):
@property
def address(self):
return '::an opaque address::'
def resolve(self):
return bill
resolvable_bill = SimpleResolvable()
self.bob = Bob(name='bob', relative=resolvable_bill, friend=bill)
def test_shallow_encoding(self):
expected_json = dedent("""
{
"name": "bob",
"type_alias": "pants_test.engine.test_parsers.Bob",
"friend": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
},
"relative": "::an opaque address::"
}
""").strip()
self.assertEqual(json.dumps(json.loads(expected_json)),
parsers.encode_json(self.bob, inline=False))
def test_inlined_encoding(self):
expected_json = dedent("""
{
"name": "bob",
"type_alias": "pants_test.engine.test_parsers.Bob",
"friend": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
},
"relative": {
"name": "bill",
"type_alias": "pants_test.engine.test_parsers.Bob"
}
}
""").strip()
self.assertEqual(json.dumps(json.loads(expected_json)),
parsers.encode_json(self.bob, inline=True))
class PythonAssignmentsParserTest(unittest.TestCase):
def test_no_symbol_table(self):
document = dedent("""
from pants_test.engine.test_parsers import Bob
nancy = Bob(
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonAssignmentsParser, document, symbol_table_cls=EmptyTable)
self.assertEqual([Bob(name='nancy', hobbies=[1, 2, 3])], results)
# No symbol table was used so no `type_alias` plumbing can be expected.
self.assertNotIn('type_alias', results[0]._asdict())
def test_symbol_table(self):
document = dedent("""
bill = nancy(
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonAssignmentsParser, document, symbol_table_cls=TestTable2)
self.assertEqual([Bob(name='bill', hobbies=[1, 2, 3])], results)
self.assertEqual('nancy', results[0]._asdict()['type_alias'])
class PythonCallbacksParserTest(unittest.TestCase):
def test(self):
document = dedent("""
nancy(
name='bill',
hobbies=[1, 2, 3]
)
""")
results = parse(parsers.PythonCallbacksParser, document, symbol_table_cls=TestTable2)
self.assertEqual([Bob(name='bill', hobbies=[1, 2, 3])], results)
self.assertEqual('nancy', results[0]._asdict()['type_alias'])
| apache-2.0 |
Yellowen/daarmaan | daarmaan/server/views/index.py | 1 | 12020 | # -----------------------------------------------------------------------------
# Daarmaan - Single Sign On Service for Yellowen
# Copyright (C) 2012 Yellowen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# -----------------------------------------------------------------------------
from django.shortcuts import render_to_response as rr
from django.shortcuts import redirect
from django.template import RequestContext
from django.contrib.auth import authenticate, login
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.conf.urls import patterns, url
from django.conf import settings
from daarmaan.server.forms import PreRegistrationForm, NewUserForm, LoginForm
from daarmaan.server.models import VerificationCode
class IndexPage(object):
"""
Daarmaan index page class.
"""
template = "index.html"
register_template = "register.html"
new_user_form_template = "new_user_form.html"
@property
def urls(self):
"""
First Page url patterns.
"""
urlpatterns = patterns(
'',
url(r"^$", self.index,
name="home"),
url(r"^register/$", self.pre_register,
name="home"),
url(r"^verificate/([A-Fa-f0-9]{40})/$", self.verificate,
name="verificate"),
url(r"^registration/done/$", self.registration_done,
name="registration-done"),
)
return urlpatterns
def index(self, request):
"""
Index view.
"""
if request.user.is_authenticated():
next_ = request.GET.get("next", None)
if not next_:
return HttpResponseRedirect(reverse('dashboard-index'))
if request.method == "POST":
return self.login(request)
else:
form = LoginForm()
next_url = request.GET.get("next", "")
return rr(self.template,
{"form": form,
"next": next_url},
context_instance=RequestContext(request))
def login(self, request):
"""
Login view that only accept a POST request.
"""
next_url = request.POST.get("next", None)
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
remember = form.cleaned_data.get("remember_me", False)
next_url = form.cleaned_data.get("next_", None)
# Authenticate the user
user = authenticate(username=username,
password=password)
if user is not None:
if user.is_active:
login(request, user)
self._setup_session(request)
if next_url:
import urllib
return HttpResponseRedirect(
urllib.unquote_plus(next_url)
)
return redirect(reverse("dashboard-index", args=[]))
else:
return rr(self.template,
{"form": form,
"msgclass": "text-error",
"next": next_url,
"msg": _("Your account is disabled.")},
context_instance=RequestContext(request))
else:
return rr(self.template,
{"form": form,
"msgclass": "text-error",
"next": next_url,
"msg": _("Username or Password is invalid.")},
context_instance=RequestContext(request))
else:
return rr(self.template,
{"form": form,
"next": next_url},
context_instance=RequestContext(request))
def pre_register(self, request):
"""
Handle the registeration request.
"""
from django.contrib.auth.models import User
from django.db import IntegrityError
if request.method == "POST":
form = PreRegistrationForm(request.POST)
msg = None
klass = ""
if form.is_valid():
# In case of valid information from user.
email = form.cleaned_data["email"]
username = form.cleaned_data["username"]
# Check for email exists
emails_count = User.objects.filter(email=email).count()
if emails_count:
failed = True
msg = _("This email has been registered before.")
klass = "text-error"
else:
try:
# Create and save an inactive user
user = User(username=username,
email=email)
user.active = False
user.save()
if settings.EMAIL_VERIFICATION:
# Generate and send a verification code to user
# only if EMAIL_VERIFICATION was set
verif_code = VerificationCode.generate(user)
verification_link = reverse("verificate",
args=[verif_code])
self.send_verification_mail(user,
verification_link)
msg = _("A verfication mail has been sent to your e-mail address.")
else:
msg = _("You're request submited, thanks for your interest.")
klass = "text-success"
form = PreRegistrationForm()
except IntegrityError:
# In case of exists username
msg = _("User already exists.")
klass = "text-error"
return rr(self.register_template,
{"form": form,
"msg": msg,
"msgclass": klass},
context_instance=RequestContext(request))
else:
form = PreRegistrationForm()
return rr(self.register_template,
{"form": form},
context_instance=RequestContext(request))
def _setup_session(self, request):
"""
Insert all needed values into user session.
"""
# TODO: Do we need to set the user services to his session.
return
services = request.user.get_profile().services.all()
services_id = [i.id for i in services]
request.session["services"] = services_id
def verificate(self, request, verification_code):
"""
This view is responsible for verify the user mail address
from the given verification code and redirect to the basic
information form view.
"""
# Look up for given verification code in the VerificationCode
# model. And check for the validation of an any possible exists
# code
try:
verified_code = VerificationCode.objects.get(
code=verification_code)
except VerificationCode.DoesNotExist:
raise Http404()
# If the verified_code was valid (belongs to past 48 hours for
# example) the new user form will allow user to finalize his/her
# registeration process.
if verified_code.is_valid():
form = NewUserForm(initial={
"verification_code": verified_code.code})
form.action = reverse("registration-done", args=[])
return rr(self.new_user_form_template,
{"form": form,
"user": verified_code.user},
context_instance=RequestContext(request))
else:
raise Http404()
def send_verification_mail(self, user, verification_link):
"""
Send the verification link to the user.
"""
from django.core.mail import send_mail
msg = verification_link
send_mail('[Yellowen] Verification', msg, settings.EMAIL,
[user.email], fail_silently=False)
def registration_done(self, request):
if request.method == "POST":
form = NewUserForm(request.POST)
try:
verified_code = VerificationCode.objects.get(
code = request.POST.get("verification_code", ""))
except VerificationCode.DoesNotExist:
from django.http import HttpResponseForbidden
return HttpResponseForbidden()
if form.is_valid():
pass1 = form.cleaned_data["password1"]
pass2 = form.cleaned_data["password2"]
fname = form.cleaned_data["first_name"]
lname = form.cleaned_data["last_name"]
if pass1 != pass2:
form._errors = {
"password1": _("Two password fields did not match."),
"password2": _("Two password fields did not match.")}
msg = _("Two password fields did not match.")
klass = "text-error"
elif len(pass1) < 6:
form._errors = {
"password1": _("Password should be more than 6 character long.")}
msg = _("Password should be more than 6 character long.")
klass = "text-error"
elif len(pass1) > 40:
form._errors = {
"password1": _("Password should be less than 40 character long.")}
msg = _("Password should be less than 40 character long.")
klass = "text-error"
else:
user = verified_code.user
user.set_password(pass1)
user.first_name = fname
user.last_name = lname
user.active = True
user.save()
# Clean up all the expired codes and currently used one
verified_code.delete()
VerificationCode.cleanup()
# Login the user
user = authenticate(username=user.username,
password=pass1)
login(request, user)
return redirect(reverse(
"dashboard-index",
args=[]))
return rr(self.new_user_form_template,
{"form": form,
"user": verified_code.user,
"msg": msg,
"msgclass": klass},
context_instance=RequestContext(request))
else:
raise Http404()
index_page = IndexPage()
| gpl-2.0 |
rashmi815/incubator-madlib | src/madpack/yaml/scanner.py | 126 | 52589 |
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from error import MarkedYAMLError
from tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == u'\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == u'%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == u'-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == u'.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == u'\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == u'[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == u'{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == u']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == u'}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == u'?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == u':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == u'*':
return self.fetch_alias()
# Is it an anchor?
if ch == u'&':
return self.fetch_anchor()
# Is it a tag?
if ch == u'!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == u'|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == u'>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == u'\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == u'\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token"
% ch.encode('utf-8'), self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in self.possible_simple_keys.keys():
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset everything (not really needed).
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'---' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'...' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
and (ch == u'-' or (not self.flow_level and ch in u'?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == u'\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == u'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == u'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not (u'0' <= ch <= '9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 0
while u'0' <= self.peek(length) <= u'9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == u' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != u' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == '*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == u'<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != u'>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek().encode('utf-8'),
self.get_mark())
self.forward()
elif ch in u'\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = u'!'
self.forward()
else:
length = 1
use_handle = False
while ch not in u'\0 \r\n\x85\u2028\u2029':
if ch == u'!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = u'!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = u'!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = u''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != u'\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in u' \t'
length = 0
while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != u'\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == u'\n' \
and leading_non_space and self.peek() not in u' \t':
if not breaks:
chunks.append(u' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == u'\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(u' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch.encode('utf-8'), self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() != u' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
while self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
u'0': u'\0',
u'a': u'\x07',
u'b': u'\x08',
u't': u'\x09',
u'\t': u'\x09',
u'n': u'\x0A',
u'v': u'\x0B',
u'f': u'\x0C',
u'r': u'\x0D',
u'e': u'\x1B',
u' ': u'\x20',
u'\"': u'\"',
u'\\': u'\\',
u'N': u'\x85',
u'_': u'\xA0',
u'L': u'\u2028',
u'P': u'\u2029',
}
ESCAPE_CODES = {
u'x': 2,
u'u': 4,
u'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == u'\'' and self.peek(1) == u'\'':
chunks.append(u'\'')
self.forward(2)
elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == u'\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k).encode('utf-8')), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(unichr(code))
self.forward(length)
elif ch in u'\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in u' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == u'\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in u' \t':
self.forward()
if self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == u'#':
break
while True:
ch = self.peek(length)
if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == u':' and
self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in u',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == u':'
and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == u'#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in u' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != u'!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 1
ch = self.peek(length)
if ch != u' ':
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if ch != u'!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
if ch == u'%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch.encode('utf-8'),
self.get_mark())
return u''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
bytes = []
mark = self.get_mark()
while self.peek() == u'%':
self.forward()
for k in range(2):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
(self.peek(k).encode('utf-8')), self.get_mark())
bytes.append(chr(int(self.prefix(2), 16)))
self.forward(2)
try:
value = unicode(''.join(bytes), 'utf-8')
except UnicodeDecodeError, exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in u'\r\n\x85':
if self.prefix(2) == u'\r\n':
self.forward(2)
else:
self.forward()
return u'\n'
elif ch in u'\u2028\u2029':
self.forward()
return ch
return u''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass
| apache-2.0 |
kennethdecker/MagnePlane | src/hyperloop/Python/pod/cycle/flow_path.py | 4 | 11938 | """
A group that models an inlet->compressor->duct->nozzle->shaft
using pycycle v2 https://github.com/OpenMDAO/pycycle2.
Calculates flow properties at the front and back of each thermodynamic
element, compressor power required, some geometry, and drag/thrust.
"""
from __future__ import print_function
import numpy as np
from os import remove
from openmdao.core.group import Group, Component, IndepVarComp
from openmdao.solvers.newton import Newton
from openmdao.api import NLGaussSeidel
from openmdao.solvers.scipy_gmres import ScipyGMRES
from openmdao.units.units import convert_units as cu
from openmdao.api import Problem, LinearGaussSeidel
from pycycle.components import Compressor, Shaft, FlowStart, Inlet, Nozzle, Duct, Splitter, FlightConditions
from pycycle.species_data import janaf
from pycycle.connect_flow import connect_flow
from pycycle.constants import AIR_FUEL_MIX, AIR_MIX
from pycycle.constants import R_UNIVERSAL_ENG, R_UNIVERSAL_SI
from openmdao.solvers.ln_gauss_seidel import LinearGaussSeidel
from openmdao.solvers.ln_direct import DirectSolver
from openmdao.api import SqliteRecorder
C_IN2toM2 = 144. * (3.28084**2.)
HPtoKW = 0.7457
tubeLen = 563270.0 # // 350 miles in meters
teslaPack = 90.0 # // kw-hours
class FlowPath(Group):
"""
Params
------
fl_start.P : float
Tube total pressure
fl_start.T : float
Tube total temperature
fl_start.W : float
Tube total mass flow
fl_start.MN_target : float
Vehicle mach number
comp.map.PRdes : float
Pressure ratio of compressor
nozzle.Ps_exhaust : float
Exit pressure of nozzle
Returns
-------
comp.torque : float
Total torque required by motor
comp.power : float
Total power required by motor
comp.Fl_O:stat:area : float
Area of the duct
nozzle.Fg : float
Nozzle thrust
inlet.F_ram : float
Ram drag
nozzle.Fl_O:tot:T : float
Total temperature at nozzle exit
nozzle.Fl_O:stat:W : float
Total mass flow rate at nozzle exit
FlowPath.inlet.Fl_O:tot:h : float
Inlet enthalpy of compressor
FlowPath.comp.Fl_O:tot:h : float
Exit enthalpy of compressor
Notes
-----
[1] see https://github.com/jcchin/pycycle2/wiki
"""
def __init__(self):
super(FlowPath, self).__init__()
des_vars = (('ram_recovery', 0.99),
('effDes', 0.9),
('duct_MN', 0.65),
('duct_dPqP', 0.),
('nozzle_Cfg', 1.0),
('nozzle_dPqP', 0.),
('shaft_Nmech', 10000.),
('inlet_MN', 0.6),
('comp_MN', 0.65))
self.add('input_vars',IndepVarComp(des_vars))
self.add('fl_start', FlowStart(thermo_data=janaf, elements=AIR_MIX))
# internal flow
self.add('inlet', Inlet(thermo_data=janaf, elements=AIR_MIX))
self.add('comp', Compressor(thermo_data=janaf, elements=AIR_MIX))
self.add('duct', Duct(thermo_data=janaf, elements=AIR_MIX))
self.add('nozzle', Nozzle(thermo_data=janaf, elements=AIR_MIX))
self.add('shaft', Shaft(1))
# connect components
connect_flow(self, 'fl_start.Fl_O', 'inlet.Fl_I')
connect_flow(self, 'inlet.Fl_O', 'comp.Fl_I')
connect_flow(self, 'comp.Fl_O', 'duct.Fl_I')
connect_flow(self, 'duct.Fl_O', 'nozzle.Fl_I')
self.connect('input_vars.ram_recovery', 'inlet.ram_recovery')
self.connect('input_vars.effDes', 'comp.map.effDes')
self.connect('input_vars.duct_MN', 'duct.MN_target')
self.connect('input_vars.duct_dPqP', 'duct.dPqP')
self.connect('input_vars.nozzle_Cfg', 'nozzle.Cfg')
self.connect('input_vars.nozzle_dPqP', 'nozzle.dPqP')
self.connect('input_vars.shaft_Nmech', 'shaft.Nmech')
self.connect('input_vars.inlet_MN', 'inlet.MN_target')
self.connect('input_vars.comp_MN', 'comp.MN_target')
self.connect('comp.trq', 'shaft.trq_0')
self.connect('shaft.Nmech', 'comp.Nmech')
if __name__ == "__main__":
prob = Problem()
root = prob.root = Group()
root.add('FlowPath', FlowPath())
recorder = SqliteRecorder('FlowPathdb')
recorder.options['record_params'] = True
recorder.options['record_metadata'] = True
prob.driver.add_recorder(recorder)
params = (('P', .1879, {'units': 'psi'}),
('T', 605.06, {'units': 'degR'}),
('W', 7.2673, {'units': 'kg/s'}),
('vehicleMach', 0.8),
('PRdes', 12.5),
('PsE', 0.05588, {'units': 'psi'}))
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.P', 'FlowPath.fl_start.P')
prob.root.connect('des_vars.T', 'FlowPath.fl_start.T')
prob.root.connect('des_vars.W', 'FlowPath.fl_start.W')
prob.root.connect('des_vars.vehicleMach', 'FlowPath.fl_start.MN_target')
prob.root.connect('des_vars.PRdes', 'FlowPath.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'FlowPath.nozzle.Ps_exhaust')
# Make sure balance runs before FlowPath
#prob.root.set_order(['des_vars', 'balance', 'FlowPath'])
prob.setup(check=True)
prob.root.list_connections()
#prob.print_all_convergence()
import time
t = time.time()
prob.run()
batteries = (-prob['FlowPath.comp.power'] * HPtoKW * (tubeLen / (
prob['FlowPath.fl_start.Fl_O:stat:V'] * 0.3048) / 3600.0)) / teslaPack
astar = np.sqrt(prob['FlowPath.fl_start.Fl_O:stat:gamma'] * R_UNIVERSAL_SI *
(cu(prob['FlowPath.fl_start.Fl_O:stat:T'], 'degR', 'degK')))
ustar = astar * prob['FlowPath.fl_start.Fl_O:stat:MN']
dstar = prob['FlowPath.fl_start.Fl_O:stat:gamma'] * cu(
prob['FlowPath.fl_start.Fl_O:stat:P'], 'psi', 'Pa') / astar**2
mustar = 0.00001716 * (cu(
prob['FlowPath.fl_start.Fl_O:stat:T'], 'degR', 'degK') / 273.15)**1.5 * (
273.15 + 110.4) / (cu(prob['FlowPath.fl_start.Fl_O:stat:T'], 'degR',
'degK') + 110.4) # --- Sutherlands Law
#Re = dstar*ustar/mustar*Lstar_Lref
#print (mustar)
print("")
print("--- Output ----------------------")
print("--- Freestream Static Conditions ---")
print("Pod Mach No.: %.6f " % (prob['FlowPath.fl_start.Fl_O:stat:MN']))
print("Ambient Ps: %.6f psi" % (prob['FlowPath.fl_start.Fl_O:stat:P']))
print("Ambient Ts: %.6f R" % (prob['FlowPath.fl_start.Fl_O:stat:T']))
print("Ambient Pt: %.6f psi" % (prob['FlowPath.fl_start.Fl_O:tot:P']))
print("Ambient Tt: %.6f R" % (prob['FlowPath.fl_start.Fl_O:tot:T']))
print("Ambient Ds: %.6f kg/m^3" %
(cu(prob['FlowPath.fl_start.Fl_O:stat:rho'], 'lbm/ft**3', 'kg/m**3')))
print("Ambient Viscosity %.8f kg/(m-s)" % (mustar)) #*1.48816394
print("Pod Velocity: %.6f m/s" %
(cu(prob['FlowPath.fl_start.Fl_O:stat:V'], 'ft/s', 'm/s')))
print("Reynolds No.= %.6f -/grid unit" % (
(cu(prob['FlowPath.fl_start.Fl_O:stat:rho'], 'lbm/ft**3', 'kg/m**3') * cu(
prob['FlowPath.fl_start.Fl_O:stat:V'], 'ft/s', 'm/s')) / (mustar)))
print("")
print("--- Fan Face Conditions ---")
print("Compressor Mach No.: %.6f " % (prob['FlowPath.inlet.Fl_O:stat:MN']))
print("Compressor Area: %.6f m^2" %
(cu(prob['FlowPath.inlet.Fl_O:stat:area'], 'inch**2', 'm**2')))
print("Compressor Radius: %.6f m" % (np.sqrt(
(cu(prob['FlowPath.inlet.Fl_O:stat:area'], 'inch**2', 'm**2')) / np.pi)))
print("Compressor Ps: %.6f psi" %
(prob['FlowPath.inlet.Fl_O:stat:P']))
print("Compressor Ts: %.6f degR" %
(prob['FlowPath.inlet.Fl_O:stat:T']))
print("Compressor Pt: %.6f psi" % (prob['FlowPath.inlet.Fl_O:tot:P']))
print("Compressor Tt: %.6f degR" %
(prob['FlowPath.inlet.Fl_O:tot:T']))
print("Compressor MFR: %.6f kg/s" %
(cu(prob['FlowPath.inlet.Fl_O:stat:W'], 'lbm/s', 'kg/s')))
print("Compressor SPR: %.6f " % (
prob['FlowPath.inlet.Fl_O:stat:P'] / prob['FlowPath.fl_start.Fl_O:stat:P']))
print("Compressor Power Reqd: %.6f hp" % (prob['FlowPath.comp.power']))
print ("Compressor inlet ht: %.6f Btu/lbm" % (prob['FlowPath.inlet.Fl_O:tot:h']))
print ("Compressor exit ht: %.6f Btu/lbm" % (prob['FlowPath.comp.Fl_O:tot:h']))
print("")
print ("--- Compressor Exit Conditions ---")
print ("Compressor Mach No.: %.6f " % (prob['FlowPath.comp.Fl_O:stat:MN']))
print ("Compressor Area: %.6f in^2" % (prob['FlowPath.comp.Fl_O:stat:area']))
print ("Compressor Radius: %.6f m" % (np.sqrt((cu(prob['FlowPath.comp.Fl_O:stat:area'], 'inch**2', 'm**2'))/np.pi)))
print ("Compressor Ps: %.6f psi" % (prob['FlowPath.comp.Fl_O:stat:P']))
print ("Compressor Ts: %.6f degR" % (prob['FlowPath.comp.Fl_O:stat:T']))
print ("Compressor Pt: %.6f psi" % (prob['FlowPath.comp.Fl_O:tot:P']))
print ("Compressor Tt: %.6f degR" % (prob['FlowPath.comp.Fl_O:tot:T']))
print ("")
print("--- Nozzle Plenum Conditions ---")
print("Nozzle Plenum Area: %.6f m^2" %
(cu(prob['FlowPath.duct.Fl_O:stat:area'], 'inch**2', 'm**2')))
print("Nozzle Plenum Radius: %.6f m " % (np.sqrt(
(cu(prob['FlowPath.duct.Fl_O:stat:area'], 'inch**2', 'm**2')) / np.pi)))
print("Nozzle Plenum Ps: %.6f psi " % (prob['FlowPath.duct.Fl_O:stat:P']))
print("Nozzle Plenum Ts: %.6f degR " %
(prob['FlowPath.duct.Fl_O:stat:T']))
print("Nozzle Plenum Pt: %.6f psi " % (prob['FlowPath.duct.Fl_O:tot:P']))
print("Nozzle Plenum Tt: %.6f degR " % (prob['FlowPath.duct.Fl_O:tot:T']))
print("Nozzle Plenum TPR %.6f" %
(prob['FlowPath.duct.Fl_O:tot:P'] / prob['FlowPath.fl_start.Fl_O:stat:P']))
print("Nozzle Plenum TTR %.6f" %
(prob['FlowPath.duct.Fl_O:tot:T'] / prob['FlowPath.fl_start.Fl_O:stat:T']))
print("")
print("--- Nozzle Exit Conditions ---")
print("Mach No.: %.6f " % (prob['FlowPath.nozzle.Fl_O:stat:MN']))
print("Nozzle Exit Area: %.6f m^2" %
(cu(prob['FlowPath.nozzle.Fl_O:stat:area'], 'inch**2', 'm**2')))
print("Nozzle Exit Radius: %.6f m " % (np.sqrt(
(cu(prob['FlowPath.nozzle.Fl_O:stat:area'], 'inch**2', 'm**2')) / np.pi)))
print("Nozzle Exit Ps: %.6f psi" % (prob['FlowPath.nozzle.Fl_O:stat:P']))
print("Nozzle Exit Ts: %.6f degR" %
(prob['FlowPath.nozzle.Fl_O:stat:T']))
print("Nozzle Exit Pt: %.6f psi" % (prob['FlowPath.nozzle.Fl_O:tot:P']))
print("Nozzle Exit Tt: %.6f degR" % (prob['FlowPath.nozzle.Fl_O:tot:T']))
print("Nozzle Exit MFR: %.6f kg/s" %
(cu(prob['FlowPath.nozzle.Fl_O:stat:W'], 'lbm/s', 'kg/s')))
print("Nozzle Thrust: %.6f lb" % prob['FlowPath.nozzle.Fg'])
print("Inlet Ram Drag: %.6f lb" % prob['FlowPath.inlet.F_ram'])
print("Pod Gross Thrust: %.6f lb" %
(prob['FlowPath.nozzle.Fg'] - prob['FlowPath.inlet.F_ram']))
print("")
print("--- Force/Power Balances ---")
print("comp pwr out: ", prob['FlowPath.comp.power'])
print("comp trq out: ", prob['FlowPath.comp.trq'])
print("net trq: ", prob['FlowPath.shaft.trq_net'])
print
# print 'resid', prob['FlowPath.pwr_balance.pwr_net']
print("comp.Fl_O:tot:P", prob['FlowPath.comp.Fl_O:tot:P'])
print("comp.Fl_O:tot:T", prob['FlowPath.comp.Fl_O:tot:T'])
print("comp.Fl_O:tot:h", prob['FlowPath.comp.Fl_O:tot:h'])
import sqlitedict
from pprint import pprint
db = sqlitedict.SqliteDict('FlowPathdb', 'openmdao')
data = db['rank0:Driver/1']
u = data['Unknowns']
#pprint(u) # print all outputs
prob.cleanup()
remove('./FlowPathdb')
quit()
| apache-2.0 |
lnielsen/zenodo | tests/unit/deposit/test_sips.py | 2 | 3891 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Test Zenodo deposit workflow."""
from __future__ import absolute_import, print_function
import json
from flask_security import login_user
from invenio_sipstore.models import SIP, RecordSIP, SIPFile
from six import BytesIO
def test_basic_workflow(app, db, users, deposit):
"""Test simple deposit publishing workflow."""
with app.test_request_context(environ_base={'REMOTE_ADDR': '127.0.0.1'}):
datastore = app.extensions['security'].datastore
login_user(datastore.get_user(users[0]['email']))
deposit.files['one.txt'] = BytesIO(b'Test')
deposit.files['two.txt'] = BytesIO(b'Test2')
deposit = deposit.publish()
# Should create one SIP, one RecordSIP and two SIPFiles
assert SIP.query.count() == 1
assert RecordSIP.query.count() == 1
assert SIPFile.query.count() == 2
sip = SIP.query.one()
assert sip.user_id == users[0]['id']
assert sip.agent['email'] == users[0]['email']
assert sip.agent['ip_address'] == '127.0.0.1'
assert len(sip.sip_files) == 2
assert sip.sip_files[0].sip_id == sip.id
assert sip.sip_files[1].sip_id == sip.id
# Publishing the second time shuld create a new SIP and new RecordSIP
# but no new SIPFiles. This is under assumption that users cannot
# upload new files to the already published deposit.
deposit = deposit.edit()
deposit['title'] = 'New Title'
deposit = deposit.publish()
assert SIP.query.count() == 2
assert RecordSIP.query.count() == 2
assert SIPFile.query.count() == 2
# Fetch the last RecordSIP and make sure, that
# the corresponding SIP doesn't have any files
recsip = RecordSIP.query.order_by(RecordSIP.created.desc()).first()
assert not recsip.sip.sip_files
def test_programmatic_publish(app, db, deposit, deposit_file):
"""Test publishing by without request.
Might never happen, but at least shouldn't crash the system."""
deposit = deposit.publish()
pid, record = deposit.fetch_published()
sip = SIP.query.one()
assert not sip.user_id
assert sip.content == json.dumps(record.dumps())
assert sip.sip_format == 'json'
assert len(sip.record_sips) == 1
assert sip.record_sips[0].pid_id == pid.id
assert len(sip.agent) == 1 # Just the '$schema' key in agent info
assert sip.agent['$schema'] == \
'http://zenodo.org/schemas/sipstore/agent-webclient-v1.0.0.json'
def test_anonymous_request(app, db, deposit):
"""Test sip creation during an anonymous request."""
with app.test_request_context(environ_base={'REMOTE_ADDR': '127.0.0.1'}):
deposit.files['one.txt'] = BytesIO(b'Test')
deposit.files['two.txt'] = BytesIO(b'Test2')
deposit.publish()
sip = SIP.query.one()
assert sip.user_id is None
assert 'email' not in sip.agent
assert sip.agent['ip_address'] == '127.0.0.1'
assert len(sip.sip_files) == 2
assert sip.sip_files[0].sip_id == sip.id
assert sip.sip_files[1].sip_id == sip.id
| gpl-2.0 |
DirtyUnicorns/android_kernel_oppo_msm8939 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
cfei18/incubator-airflow | tests/operators/bash_operator.py | 2 | 2554 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from datetime import datetime, timedelta
from airflow import DAG
from airflow.models import State
from airflow.operators.bash_operator import BashOperator
from airflow.utils import timezone
DEFAULT_DATE = datetime(2016, 1, 1, tzinfo=timezone.utc)
END_DATE = datetime(2016, 1, 2, tzinfo=timezone.utc)
INTERVAL = timedelta(hours=12)
class BashOperatorTestCase(unittest.TestCase):
def test_echo_env_variables(self):
"""
Test that env variables are exported correctly to the
task bash environment.
"""
now = datetime.utcnow()
now = now.replace(tzinfo=timezone.utc)
self.dag = DAG(
dag_id='bash_op_test', default_args={
'owner': 'airflow',
'retries': 100,
'start_date': DEFAULT_DATE
},
schedule_interval='@daily',
dagrun_timeout=timedelta(minutes=60))
self.dag.create_dagrun(
run_id='manual__' + DEFAULT_DATE.isoformat(),
execution_date=DEFAULT_DATE,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
import tempfile
with tempfile.NamedTemporaryFile() as f:
fname = f.name
t = BashOperator(
task_id='echo_env_vars',
dag=self.dag,
bash_command='echo $AIRFLOW_HOME>> {0};'
'echo $PYTHONPATH>> {0};'.format(fname)
)
os.environ['AIRFLOW_HOME'] = 'MY_PATH_TO_AIRFLOW_HOME'
t.run(DEFAULT_DATE, DEFAULT_DATE,
ignore_first_depends_on_past=True, ignore_ti_state=True)
with open(fname, 'r') as fr:
output = ''.join(fr.readlines())
self.assertIn('MY_PATH_TO_AIRFLOW_HOME', output)
# exported in run_unit_tests.sh as part of PYTHONPATH
self.assertIn('tests/test_utils', output)
| apache-2.0 |
jvrsantacruz/XlsxWriter | xlsxwriter/test/worksheet/test_cond_format10.py | 8 | 2976 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
import unittest
from datetime import datetime
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
date = datetime.strptime('2011-01-01', "%Y-%m-%d")
worksheet.conditional_format('A1:A4',
{'type': 'date',
'criteria': 'greater than',
'value': date,
'format': None,
})
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="cellIs" priority="1" operator="greaterThan">
<formula>40544</formula>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause |
igemsoftware/SYSU-Software2013 | project/Python27/Lib/test/test_types.py | 112 | 29723 | # Python test set -- part 6, built-in types
from test.test_support import run_unittest, have_unicode, run_with_locale, \
check_py3k_warnings
import unittest
import sys
import locale
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0L: self.fail('0L is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1L: self.fail('1L is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: self.fail('long int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0L
except ZeroDivisionError: pass
else: self.fail("5 / 0L didn't raise ZeroDivisionError")
try: 5 // 0L
except ZeroDivisionError: pass
else: self.fail("5 // 0L didn't raise ZeroDivisionError")
try: 5 % 0L
except ZeroDivisionError: pass
else: self.fail("5 % 0L didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0L or 0 != 0.0 or 0L != 0.0: self.fail('mixed comparisons')
if 1 != 1L or 1 != 1.0 or 1L != 1.0: self.fail('mixed comparisons')
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
self.fail('int/long/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if long() != 0L: self.fail('long() does not return 0L')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: self.fail('long() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
self.assertEqual('%g' % 1.0, '1')
self.assertEqual('%#g' % 1.0, '1.00000')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
def test_long_integers(self):
if 12L + 24L != 36L: self.fail('long op')
if 12L + (-24L) != -12L: self.fail('long op')
if (-12L) + 24L != 12L: self.fail('long op')
if (-12L) + (-24L) != -36L: self.fail('long op')
if not 12L < 24L: self.fail('long op')
if not -24L < -12L: self.fail('long op')
x = sys.maxint
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)+1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
x = -x
if int(long(x)) != x: self.fail('long op')
x = x-1
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)-1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5L << -5L
except ValueError: pass
else: self.fail('long negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
try: 5L >> -5L
except ValueError: pass
else: self.fail('long negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], '02468')
if have_unicode:
a = unicode('0123456789', 'ascii')
self.assertEqual(a[::], a)
self.assertEqual(a[::2], unicode('02468', 'ascii'))
self.assertEqual(a[1::2], unicode('13579', 'ascii'))
self.assertEqual(a[::-1], unicode('9876543210', 'ascii'))
self.assertEqual(a[::-2], unicode('97531', 'ascii'))
self.assertEqual(a[3::-2], unicode('31', 'ascii'))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], unicode('02468', 'ascii'))
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_buffers(self):
self.assertRaises(ValueError, buffer, 'asdf', -1)
cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1
self.assertRaises(TypeError, buffer, None)
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
self.fail('buffers should not be equal')
if str(b) != ('asdf' * 5):
self.fail('repeated buffer has wrong content')
if str(a * 0) != '':
self.fail('repeated buffer zero times has wrong content')
if str(a + buffer('def')) != 'asdfdef':
self.fail('concatenation of buffers yields wrong content')
if str(buffer(a)) != 'asdf':
self.fail('composing buffers failed')
if str(buffer(a, 2)) != 'df':
self.fail('specifying buffer offset failed')
if str(buffer(a, 0, 2)) != 'as':
self.fail('specifying buffer size failed')
if str(buffer(a, 1, 2)) != 'sd':
self.fail('specifying buffer offset and size failed')
self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1)
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
self.fail('composing length-specified buffer failed')
try: a[1] = 'g'
except TypeError: pass
else: self.fail("buffer assignment should raise TypeError")
try: a[0:1] = 'g'
except TypeError: pass
else: self.fail("buffer slice assignment should raise TypeError")
# array.array() returns an object that does not implement a char buffer,
# something which int() uses for conversion.
import array
try: int(buffer(array.array('c')))
except TypeError: pass
else: self.fail("char buffer (at C level) not working")
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure I'm not accidentally checking longs
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
def test_long__format__(self):
def test(i, format_spec, result):
# make sure we're not accidentally checking ints
assert type(i) == long
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
test(123456789L, 'd', '123456789')
test(123456789L, 'd', '123456789')
# sign and aligning are interdependent
test(1L, "-", '1')
test(-1L, "-", '-1')
test(1L, "-3", ' 1')
test(-1L, "-3", ' -1')
test(1L, "+3", ' +1')
test(-1L, "+3", ' -1')
test(1L, " 3", ' 1')
test(-1L, " 3", ' -1')
test(1L, " ", ' 1')
test(-1L, " ", '-1')
test(1L, 'c', '\01')
# hex
test(3L, "x", "3")
test(3L, "X", "3")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(1234L, "8x", " 4d2")
test(-1234L, "8x", " -4d2")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(-3L, "x", "-3")
test(-3L, "X", "-3")
test(long('be', 16), "x", "be")
test(long('be', 16), "X", "BE")
test(-long('be', 16), "x", "-be")
test(-long('be', 16), "X", "-BE")
# octal
test(3L, "o", "3")
test(-3L, "o", "-3")
test(65L, "o", "101")
test(-65L, "o", "-101")
test(1234L, "o", "2322")
test(-1234L, "o", "-2322")
test(1234L, "-o", "2322")
test(-1234L, "-o", "-2322")
test(1234L, " o", " 2322")
test(-1234L, " o", "-2322")
test(1234L, "+o", "+2322")
test(-1234L, "+o", "-2322")
# binary
test(3L, "b", "11")
test(-3L, "b", "-11")
test(1234L, "b", "10011010010")
test(-1234L, "b", "-10011010010")
test(1234L, "-b", "10011010010")
test(-1234L, "-b", "-10011010010")
test(1234L, " b", " 10011010010")
test(-1234L, " b", "-10011010010")
test(1234L, "+b", "+10011010010")
test(-1234L, "+b", "-10011010010")
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3L .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3L .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3L .__format__, None)
self.assertRaises(TypeError, 3L .__format__, 0)
# alternate specifier in wrong place
self.assertRaises(ValueError, 1L .__format__, "#+5x")
self.assertRaises(ValueError, 1L .__format__, "+5#x")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0L .__format__, format_spec)
self.assertRaises(ValueError, 1L .__format__, format_spec)
self.assertRaises(ValueError, (-1L) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the long to a float
for format_spec in 'eEfFgG%':
for value in [0L, 1L, -1L, 100L, -100L, 1234567890L, -1234567890L]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456L, "0<20", '12345600000000000000')
test(123456L, "1<20", '12345611111111111111')
test(123456L, "*<20", '123456**************')
test(123456L, "0>20", '00000000000000123456')
test(123456L, "1>20", '11111111111111123456')
test(123456L, "*>20", '**************123456')
test(123456L, "0=20", '00000000000000123456')
test(123456L, "1=20", '11111111111111123456')
test(123456L, "*=20", '**************123456')
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
def test(f, format_spec, result):
assert type(f) == float
assert type(format_spec) == str
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(f.__format__(unicode(format_spec)), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 2.6 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totaly empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, 0.0, '#')
self.assertRaises(ValueError, format, 0.0, '#20f')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_main():
with check_py3k_warnings(
("buffer.. not supported", DeprecationWarning),
("classic long division", DeprecationWarning)):
run_unittest(TypesTests)
if __name__ == '__main__':
test_main()
| mit |
TodoOrTODO/p4factory | targets/l2_switch/tests/ptf-tests/l2_switch.py | 7 | 2582 | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ptf.dataplane as dataplane
import pd_base_tests
from ptf.testutils import *
from ptf.thriftutils import *
from p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
def setup_default_table_configurations(client, sess_hdl, dev_tgt):
client.clean_all(sess_hdl, dev_tgt)
result = client.smac_set_default_action_mac_learn(sess_hdl, dev_tgt)
assert result == 0
result = client.dmac_set_default_action_broadcast(sess_hdl, dev_tgt)
assert result == 0
result = client.mcast_src_pruning_set_default_action__nop(sess_hdl, dev_tgt)
assert result == 0
def setup_pre(mc, sess_hdl, dev_tgt):
mgrp_hdl = mc.mc_mgrp_create(sess_hdl, dev_tgt.dev_id, 1)
port_map = [0] * 32
lag_map = [0] * 32
# port 1, port 2, port 3
port_map[0] = (1 << 1) | (1 << 2) | (1 << 3)
node_hdl = mc.mc_node_create(sess_hdl, dev_tgt.dev_id, 0,
bytes_to_string(port_map),
bytes_to_string(lag_map))
mc.mc_associate_node(sess_hdl, dev_tgt.dev_id, mgrp_hdl, node_hdl)
class SimpleReplicationTest(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self, "l2_switch")
def runTest(self):
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
setup_default_table_configurations(self.client, sess_hdl, dev_tgt)
setup_pre(self.mc, sess_hdl, dev_tgt)
# 5 is instance_type for replicated packets
match_spec = l2_switch_mcast_src_pruning_match_spec_t(5)
self.client.mcast_src_pruning_table_add_with__drop(
sess_hdl, dev_tgt, match_spec
)
pkt = simple_ip_packet(ip_dst='10.0.0.2',
ip_id=101,
ip_ttl=64)
send_packet(self, 2, str(pkt))
exp_pkt = pkt
verify_packets(self, exp_pkt, [1, 3]) # port 2 should have been pruned
| apache-2.0 |
spandanb/horizon | openstack_dashboard/dashboards/router/nexus1000v/urls.py | 1 | 1211 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abishek Subramanian, Cisco Systems, Inc.
# @author: Sergey Sudakovich, Cisco Systems, Inc.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.router.nexus1000v import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
#Network Profile
url(r'^network_profile/create$', views.CreateNetworkProfileView.as_view(),
name='create_network_profile'),
url(r'^network_profile/(?P<profile_id>[^/]+)/update$',
views.UpdateNetworkProfileView.as_view(),
name='update_network_profile'),
)
| apache-2.0 |
hackupc/backend | app/hackathon_variables.py | 1 | 4066 | # HACKATHON PERSONALIZATION
import os
from django.utils import timezone
HACKATHON_NAME = 'HackAssistant'
# What's the name for the application
HACKATHON_APPLICATION_NAME = 'HackAssistant registration'
# Hackathon timezone
TIME_ZONE = 'MST'
# This description will be used on the html and sharing meta tags
HACKATHON_DESCRIPTION = 'HackAssistant is an organization to mantain ' \
'a few open-source projects related with hackathon management'
# Domain where application is deployed, can be set by env variable
HACKATHON_DOMAIN = os.environ.get('DOMAIN', None)
HEROKU_APP_NAME = os.environ.get('HEROKU_APP_NAME', None)
if HEROKU_APP_NAME and not HACKATHON_DOMAIN:
HACKATHON_DOMAIN = '%s.herokuapp.com' % HEROKU_APP_NAME
elif not HACKATHON_DOMAIN:
HACKATHON_DOMAIN = 'localhost:8000'
# Hackathon contact email: where should all hackers contact you. It will also be used as a sender for all emails
HACKATHON_CONTACT_EMAIL = '[email protected]'
# Hackathon logo url, will be used on all emails
HACKATHON_LOGO_URL = 'https://avatars2.githubusercontent.com/u/33712329?s=200&v=4'
HACKATHON_OG_IMAGE = 'https://hackcu.org/img/hackcu_ogimage870x442.png'
# (OPTIONAL) Track visits on your website
# HACKATHON_GOOGLE_ANALYTICS = 'UA-7777777-2'
# (OPTIONAL) Hackathon twitter user
HACKATHON_TWITTER_ACCOUNT = 'casassaez'
# (OPTIONAL) Hackathon Facebook page
HACKATHON_FACEBOOK_PAGE = 'casassaez'
# (OPTIONAL) Github Repo for this project (so meta)
HACKATHON_GITHUB_REPO = 'https://github.com/hackassistant/registration/'
# (OPTIONAL) Applications deadline
# HACKATHON_APP_DEADLINE = timezone.datetime(2018, 2, 24, 3, 14, tzinfo=timezone.pytz.timezone(TIME_ZONE))
# (OPTIONAL) When to arrive at the hackathon
HACKATHON_ARRIVE = 'Registration opens at 3:00 PM and closes at 6:00 PM on Friday October 13th, ' \
'the opening ceremony will be at 7:00 pm.'
# (OPTIONAL) When to arrive at the hackathon
HACKATHON_LEAVE = 'Closing ceremony will be held on Sunday October 15th from 3:00 PM to 5:00 PM. ' \
'However the projects demo fair will be held in the morning from 10:30 AM to 1 PM.'
# (OPTIONAL) Hackathon live page
# HACKATHON_LIVE_PAGE = 'https://gerard.space/live'
# (OPTIONAL) Regex to automatically match organizers emails and set them as organizers when signing up
REGEX_HACKATHON_ORGANIZER_EMAIL = '^.*@gerard\.space$'
# (OPTIONAL) Sends 500 errors to email whilst in production mode.
HACKATHON_DEV_EMAILS = []
# Reimbursement configuration
REIMBURSEMENT_ENABLED = True
CURRENCY = '$'
REIMBURSEMENT_EXPIRY_DAYS = 5
REIMBURSEMENT_REQUIREMENTS = 'You have to submit a project and demo it during the event in order to be reimbursed.'
REIMBURSEMENT_DEADLINE = timezone.datetime(2028, 2, 24, 3, 14, tzinfo=timezone.pytz.timezone(TIME_ZONE))
# (OPTIONAL) Max team members. Defaults to 4
TEAMS_ENABLED = True
HACKATHON_MAX_TEAMMATES = 4
# (OPTIONAL) Code of conduct link
# CODE_CONDUCT_LINK = "https://pages.hackcu.org/code_conduct/"
# (OPTIONAL) Slack credentials
# Highly recommended to create a separate user account to extract the token from
SLACK = {
'team': os.environ.get('SL_TEAM', 'test'),
# Get it here: https://api.slack.com/custom-integrations/legacy-tokens
'token': os.environ.get('SL_TOKEN', None)
}
# (OPTIONAL) Logged in cookie
# This allows to store an extra cookie in the browser to be shared with other application on the same domain
LOGGED_IN_COOKIE_DOMAIN = '.gerard.space'
LOGGED_IN_COOKIE_KEY = 'hackassistant_logged_in'
# Hardware configuration
HARDWARE_ENABLED = False
# Hardware request time length (in minutes)
HARDWARE_REQUEST_TIME = 15
# Can Hackers start a request on the hardware lab?
# HACKERS_CAN_REQUEST = False
# Enable dubious separate pipeline (disabled by default)
DUBIOUS_ENABLED = False
# Enable blacklist separate pipeline (disabled by default)
BLACKLIST_ENABLED = False
SUPPORTED_RESUME_EXTENSIONS = []
# Mentor/Volunteer applications can expire if they are invited, set to False to not
MENTOR_EXPIRES = False
VOLUNTEER_EXPIRES = False
| mit |
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/template_module.py | 1 | 2587 | """
Template module
"""
from string import Template
from lxml import etree
from xmodule.raw_module import RawDescriptor
from xmodule.x_module import DEPRECATION_VSCOMPAT_EVENT, XModule # lint-amnesty, pylint: disable=unused-import
class CustomTagModule(XModule):
"""
This module supports tags of the form
<customtag option="val" option2="val2" impl="tagname"/>
In this case, $tagname should refer to a file in data/custom_tags, which
contains a Python string.Template formatted template that uses ${option} and
${option2} for the content.
For instance:
data/mycourse/custom_tags/book::
More information given in <a href="/book/${page}">the text</a>
course.xml::
...
<customtag page="234" impl="book"/>
...
Renders to::
More information given in <a href="/book/234">the text</a>
"""
def get_html(self):
return self.descriptor.rendered_html
class CustomTagDescriptor(RawDescriptor):
""" Descriptor for custom tags. Loads the template when created."""
module_class = CustomTagModule
resources_dir = None
template_dir_name = 'customtag'
def render_template(self, system, xml_data):
'''Render the template, given the definition xml_data'''
xmltree = etree.fromstring(xml_data)
if 'impl' in xmltree.attrib:
template_name = xmltree.attrib['impl']
else:
# VS[compat] backwards compatibility with old nested customtag structure
child_impl = xmltree.find('impl')
if child_impl is not None:
template_name = child_impl.text
else:
# TODO (vshnayder): better exception type
raise Exception("Could not find impl attribute in customtag {0}"
.format(self.location))
params = dict(list(xmltree.items()))
# cdodge: look up the template as a module
template_loc = self.location.replace(category='custom_tag_template', name=template_name)
template_module = system.load_item(template_loc)
template_module_data = template_module.data
template = Template(template_module_data)
return template.safe_substitute(params)
@property
def rendered_html(self):
return self.render_template(self.system, self.data)
def export_to_file(self):
"""
Custom tags are special: since they're already pointers, we don't want
to export them in a file with yet another layer of indirection.
"""
return False
| agpl-3.0 |
jdugge/QGIS | tests/src/python/test_qgspallabeling_base.py | 13 | 17945 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite setup
From build dir, run: ctest -R PyQgsPalLabelingBase -V
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '07/09/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
import sys
import datetime
import glob
import shutil
from collections.abc import Callable
from qgis.PyQt.QtCore import QSize, qDebug, Qt
from qgis.PyQt.QtGui import QFont, QColor
from qgis.core import (
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsDataSourceUri,
QgsGeometry,
QgsLabelingEngineSettings,
QgsProject,
QgsMapSettings,
QgsPalLabeling,
QgsPalLayerSettings,
QgsProviderRegistry,
QgsStringReplacementCollection,
QgsVectorLayer,
QgsVectorLayerSimpleLabeling,
QgsMultiRenderChecker,
QgsUnitTypes,
QgsVectorTileLayer,
QgsVectorTileBasicLabelingStyle,
QgsWkbTypes,
QgsVectorTileBasicLabeling
)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import (
unitTestDataPath,
getTempfilePath,
renderMapToImage,
loadTestFonts,
getTestFont,
openInBrowserTab
)
start_app(sys.platform != 'darwin') # No cleanup on mac os x, it crashes the pallabelingcanvas test on exit
FONTSLOADED = loadTestFonts()
PALREPORT = 'PAL_REPORT' in os.environ
PALREPORTS = {}
# noinspection PyPep8Naming,PyShadowingNames
class TestQgsPalLabeling(unittest.TestCase):
_TestDataDir = unitTestDataPath()
_PalDataDir = os.path.join(_TestDataDir, 'labeling')
_TestFont = getTestFont() # Roman at 12 pt
""":type: QFont"""
_MapRegistry = None
""":type: QgsProject"""
_MapSettings = None
""":type: QgsMapSettings"""
_Canvas = None
""":type: QgsMapCanvas"""
_BaseSetup = False
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# qgis iface
cls._Iface = get_iface()
cls._Canvas = cls._Iface.mapCanvas()
cls._TestFunction = ''
cls._TestGroup = ''
cls._TestGroupPrefix = ''
cls._TestGroupAbbr = ''
cls._TestGroupCanvasAbbr = ''
cls._TestImage = ''
cls._TestMapSettings = None
cls._Mismatch = 0
cls._Mismatches = dict()
cls._ColorTol = 0
cls._ColorTols = dict()
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
# noinspection PyArgumentList
cls._MapRegistry = QgsProject.instance()
cls._MapSettings = cls.getBaseMapSettings()
osize = cls._MapSettings.outputSize()
cls._Canvas.resize(QSize(osize.width(), osize.height())) # necessary?
# set color to match render test comparisons background
cls._Canvas.setCanvasColor(cls._MapSettings.backgroundColor())
cls.setDefaultEngineSettings()
cls._BaseSetup = True
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def setUp(self):
"""Run before each test."""
TestQgsPalLabeling.setDefaultEngineSettings()
self.lyr = self.defaultLayerSettings()
@classmethod
def setDefaultEngineSettings(cls):
"""Restore default settings for pal labeling"""
settings = QgsLabelingEngineSettings()
settings.setPlacementVersion(QgsLabelingEngineSettings.PlacementEngineVersion2)
cls._MapSettings.setLabelingEngineSettings(settings)
@classmethod
def removeAllLayers(cls):
cls._MapSettings.setLayers([])
cls._MapRegistry.removeAllMapLayers()
@classmethod
def removeMapLayer(cls, layer):
if layer is None:
return
lyr_id = layer.id()
cls._MapRegistry.removeMapLayer(lyr_id)
ms_layers = cls._MapSettings.layers()
if layer in ms_layers:
ms_layers.remove(layer)
cls._MapSettings.setLayers(ms_layers)
@classmethod
def getTestFont(cls):
return QFont(cls._TestFont)
@classmethod
def loadFeatureLayer(cls, table, chk=False):
if chk and cls._MapRegistry.mapLayersByName(table):
return
vlayer = QgsVectorLayer('{}/{}.geojson'.format(cls._PalDataDir, table), table, 'ogr')
assert vlayer.isValid()
# .qml should contain only style for symbology
vlayer.loadNamedStyle(os.path.join(cls._PalDataDir,
'{0}.qml'.format(table)))
# qDebug('render_lyr = {0}'.format(repr(vlayer)))
cls._MapRegistry.addMapLayer(vlayer)
# place new layer on top of render stack
render_lyrs = [vlayer]
render_lyrs.extend(cls._MapSettings.layers())
# qDebug('render_lyrs = {0}'.format(repr(render_lyrs)))
cls._MapSettings.setLayers(render_lyrs)
# zoom to aoi
cls._MapSettings.setExtent(cls.aoiExtent())
cls._Canvas.zoomToFullExtent()
return vlayer
@classmethod
def aoiExtent(cls):
"""Area of interest extent, which matches output aspect ratio"""
aoilayer = QgsVectorLayer('{}/aoi.geojson'.format(cls._PalDataDir), 'aoi', 'ogr')
assert aoilayer.isValid()
return aoilayer.extent()
@classmethod
def getBaseMapSettings(cls):
"""
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
# default for labeling test data: WGS 84 / UTM zone 13N
crs = QgsCoordinateReferenceSystem('epsg:32613')
ms.setBackgroundColor(QColor(152, 219, 249))
ms.setOutputSize(QSize(420, 280))
ms.setOutputDpi(72)
ms.setFlag(QgsMapSettings.Antialiasing, True)
ms.setFlag(QgsMapSettings.UseAdvancedEffects, False)
ms.setFlag(QgsMapSettings.ForceVectorOutput, False) # no caching?
ms.setDestinationCrs(crs)
ms.setExtent(cls.aoiExtent())
return ms
def cloneMapSettings(self, oms):
"""
:param QgsMapSettings oms: Other QgsMapSettings
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
ms.setBackgroundColor(oms.backgroundColor())
ms.setOutputSize(oms.outputSize())
ms.setOutputDpi(oms.outputDpi())
ms.setFlags(oms.flags())
ms.setDestinationCrs(oms.destinationCrs())
ms.setExtent(oms.extent())
ms.setOutputImageFormat(oms.outputImageFormat())
ms.setLabelingEngineSettings(oms.labelingEngineSettings())
ms.setLayers(oms.layers())
return ms
def configTest(self, prefix, abbr):
"""Call in setUp() function of test subclass"""
self._TestGroupPrefix = prefix
self._TestGroupAbbr = abbr
# insert test's Class.function marker into debug output stream
# this helps visually track down the start of a test's debug output
testid = self.id().split('.')
self._TestGroup = testid[1]
self._TestFunction = testid[2]
testheader = '\n#####_____ {0}.{1} _____#####\n'.\
format(self._TestGroup, self._TestFunction)
qDebug(testheader)
# define the shorthand name of the test (to minimize file name length)
self._Test = '{0}_{1}'.format(self._TestGroupAbbr,
self._TestFunction.replace('test_', ''))
def defaultLayerSettings(self):
lyr = QgsPalLayerSettings()
lyr.fieldName = 'text' # default in test data sources
font = self.getTestFont()
font.setPointSize(32)
format = lyr.format()
format.setFont(font)
format.setNamedStyle('Roman')
format.setSize(32)
format.setSizeUnit(QgsUnitTypes.RenderPoints)
format.buffer().setJoinStyle(Qt.BevelJoin)
lyr.setFormat(format)
return lyr
@staticmethod
def settingsDict(lyr):
"""Return a dict of layer-level labeling settings
.. note:: QgsPalLayerSettings is not a QObject, so we can not collect
current object properties, and the public properties of the C++ obj
can't be listed with __dict__ or vars(). So, we sniff them out relative
to their naming convention (camelCase), as reported by dir().
"""
res = {}
for attr in dir(lyr):
if attr[0].islower() and not attr.startswith("__"):
value = getattr(lyr, attr)
if isinstance(value, (QgsGeometry, QgsStringReplacementCollection, QgsCoordinateTransform)):
continue # ignore these objects
if not isinstance(value, Callable):
res[attr] = value
return res
def controlImagePath(self, grpprefix=''):
if not grpprefix:
grpprefix = self._TestGroupPrefix
return os.path.join(self._TestDataDir, 'control_images',
'expected_' + grpprefix,
self._Test, self._Test + '.png')
def saveControlImage(self, tmpimg=''):
# don't save control images for RenderVsOtherOutput (Vs) tests, since
# those control images belong to a different test result
if ('PAL_CONTROL_IMAGE' not in os.environ
or 'Vs' in self._TestGroup):
return
imgpath = self.controlImagePath()
testdir = os.path.dirname(imgpath)
if not os.path.exists(testdir):
os.makedirs(testdir)
imgbasepath = \
os.path.join(testdir,
os.path.splitext(os.path.basename(imgpath))[0])
# remove any existing control images
for f in glob.glob(imgbasepath + '.*'):
if os.path.exists(f):
os.remove(f)
qDebug('Control image for {0}.{1}'.format(self._TestGroup,
self._TestFunction))
if not tmpimg:
# TODO: this can be deprecated, when per-base-test-class rendering
# in checkTest() is verified OK for all classes
qDebug('Rendering control to: {0}'.format(imgpath))
ms = self._MapSettings # class settings
""":type: QgsMapSettings"""
settings_type = 'Class'
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
settings_type = 'Test'
qDebug('MapSettings type: {0}'.format(settings_type))
img = renderMapToImage(ms, parallel=False)
""":type: QImage"""
tmpimg = getTempfilePath('png')
if not img.save(tmpimg, 'png'):
os.unlink(tmpimg)
raise OSError('Control not created for: {0}'.format(imgpath))
if tmpimg and os.path.exists(tmpimg):
qDebug('Copying control to: {0}'.format(imgpath))
shutil.copyfile(tmpimg, imgpath)
else:
raise OSError('Control not copied to: {0}'.format(imgpath))
def renderCheck(self, mismatch=0, colortol=0, imgpath='', grpprefix=''):
"""Check rendered map canvas or existing image against control image
:mismatch: number of pixels different from control, and still valid
:colortol: maximum difference for each color component including alpha
:imgpath: existing image; if present, skips rendering canvas
:grpprefix: compare test image/rendering against different test group
"""
if not grpprefix:
grpprefix = self._TestGroupPrefix
chk = QgsMultiRenderChecker()
chk.setControlPathPrefix('expected_' + grpprefix)
chk.setControlName(self._Test)
if imgpath:
chk.setRenderedImage(imgpath)
ms = self._MapSettings # class settings
if self._TestMapSettings is not None:
ms = self._TestMapSettings # per test settings
chk.setMapSettings(ms)
chk.setColorTolerance(colortol)
# noinspection PyUnusedLocal
res = chk.runTest(self._Test, mismatch)
if PALREPORT and not res: # don't report OK checks
testname = self._TestGroup + ' . ' + self._Test
PALREPORTS[testname] = chk.report()
msg = '\nRender check failed for "{0}"'.format(self._Test)
return res, msg
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
def testSplitToLines(self):
self.assertEqual(QgsPalLabeling.splitToLines('', ''), [''])
self.assertEqual(QgsPalLabeling.splitToLines('abc def', ''), ['abc def'])
self.assertEqual(QgsPalLabeling.splitToLines('abc def', ' '), ['abc', 'def'])
self.assertEqual(QgsPalLabeling.splitToLines('abc\ndef', ' '), ['abc', 'def'])
class TestPALConfig(TestQgsPalLabeling):
@classmethod
def setUpClass(cls):
TestQgsPalLabeling.setUpClass()
cls.layer = TestQgsPalLabeling.loadFeatureLayer('point')
@classmethod
def tearDownClass(cls):
cls.removeMapLayer(cls.layer)
def setUp(self):
"""Run before each test."""
self.configTest('pal_base', 'base')
def tearDown(self):
"""Run after each test."""
pass
def test_default_pal_disabled(self):
# Verify PAL labeling is disabled for layer by default
palset = self.layer.customProperty('labeling', '')
msg = '\nExpected: Empty string\nGot: {0}'.format(palset)
self.assertEqual(palset, '', msg)
def test_settings_no_labeling(self):
self.layer.setLabeling(None)
self.assertEqual(None, self.layer.labeling())
def test_layer_pal_activated(self):
# Verify, via engine, that PAL labeling can be activated for layer
lyr = self.defaultLayerSettings()
self.layer.setLabeling(QgsVectorLayerSimpleLabeling(lyr))
msg = '\nLayer labeling not activated, as reported by labelingEngine'
self.assertTrue(QgsPalLabeling.staticWillUseLayer(self.layer), msg)
# also test for vector tile layer
tile_layer = QgsVectorTileLayer('x', 'y')
self.assertFalse(QgsPalLabeling.staticWillUseLayer(tile_layer))
st = QgsVectorTileBasicLabelingStyle()
st.setStyleName("st1")
st.setLayerName("place")
st.setFilterExpression("rank = 1 AND class = 'country'")
st.setGeometryType(QgsWkbTypes.PointGeometry)
labeling = QgsVectorTileBasicLabeling()
labeling.setStyles([st])
tile_layer.setLabeling(labeling)
self.assertTrue(QgsPalLabeling.staticWillUseLayer(tile_layer))
def test_write_read_settings(self):
# Verify written PAL settings are same when read from layer
# load and write default test settings
lyr1 = self.defaultLayerSettings()
lyr1dict = self.settingsDict(lyr1)
# print(lyr1dict)
self.layer.setLabeling(QgsVectorLayerSimpleLabeling(lyr1))
# read settings
lyr2 = self.layer.labeling().settings()
lyr2dict = self.settingsDict(lyr2)
# print(lyr2dict)
msg = '\nLayer settings read not same as settings written'
self.assertDictEqual(lyr1dict, lyr2dict, msg)
def test_default_partials_labels_enabled(self):
# Verify ShowingPartialsLabels is enabled for PAL by default
engine_settings = QgsLabelingEngineSettings()
self.assertTrue(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
def test_partials_labels_activate(self):
engine_settings = QgsLabelingEngineSettings()
# Enable partials labels
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates)
self.assertTrue(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
def test_partials_labels_deactivate(self):
engine_settings = QgsLabelingEngineSettings()
# Disable partials labels
engine_settings.setFlag(QgsLabelingEngineSettings.UsePartialCandidates, False)
self.assertFalse(engine_settings.testFlag(QgsLabelingEngineSettings.UsePartialCandidates))
# noinspection PyPep8Naming,PyShadowingNames
def runSuite(module, tests):
"""This allows for a list of test names to be selectively run.
Also, ensures unittest verbose output comes at end, after debug output"""
loader = unittest.defaultTestLoader
if 'PAL_SUITE' in os.environ:
if tests:
suite = loader.loadTestsFromNames(tests, module)
else:
raise Exception(
"\n\n####__ 'PAL_SUITE' set, but no tests specified __####\n")
else:
suite = loader.loadTestsFromModule(module)
verb = 2 if 'PAL_VERBOSE' in os.environ else 0
res = unittest.TextTestRunner(verbosity=verb).run(suite)
if PALREPORTS:
teststamp = 'PAL Test Report: ' + \
datetime.datetime.now().strftime('%Y-%m-%d %X')
report = '<html><head><title>{0}</title></head><body>'.format(teststamp)
report += '\n<h2>Failed Tests: {0}</h2>'.format(len(PALREPORTS))
for k, v in list(PALREPORTS.items()):
report += '\n<h3>{0}</h3>\n{1}'.format(k, v)
report += '</body></html>'
tmp_name = getTempfilePath('html')
with open(tmp_name, 'wt') as report_file:
report_file.write(report)
openInBrowserTab('file://' + tmp_name)
return res
if __name__ == '__main__':
# NOTE: unless PAL_SUITE env var is set all test class methods will be run
# ex: 'TestGroup(Point|Line|Curved|Polygon|Feature).test_method'
suite = [
'TestPALConfig.test_write_read_settings'
]
res = runSuite(sys.modules[__name__], suite)
sys.exit(not res.wasSuccessful())
| gpl-2.0 |
jroyal/plexpy | lib/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| gpl-3.0 |
HKUST-SING/tensorflow | tensorflow/python/util/nest.py | 24 | 17585 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module is used to perform any operations on nested structures, which can be
specified as sequences that contain non-sequence elements or other sequences.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e. no references in the structure of the input of these functions
should be recursive.
@@assert_same_structure
@@is_sequence
@@flatten
@@flatten_dict_items
@@pack_sequence_as
@@map_structure
@@assert_shallow_structure
@@flatten_up_to
@@map_structure_up_to
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, or a `namedtuple` class.
args: elements to be converted to a sequence.
Returns:
`args` with the type of `instance`.
"""
if (isinstance(instance, tuple) and
hasattr(instance, "_fields") and
isinstance(instance._fields, collections.Sequence) and
all(isinstance(f, six.string_types) for f in instance._fields)):
# This is a namedtuple
return type(instance)(*args)
else:
# Not a namedtuple
return type(instance)(args)
def _yield_flat_nest(nest):
for n in nest:
if is_sequence(n):
for ni in _yield_flat_nest(n):
yield ni
else:
yield n
def is_sequence(seq):
"""Returns a true if its input is a collections.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.Sequence.
"""
return (isinstance(seq, collections.Sequence)
and not isinstance(seq, six.string_types))
def flatten(nest):
"""Returns a flat sequence from a given nested structure.
If `nest` is not a sequence, this returns a single-element list: `[nest]`.
Args:
nest: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the flattened version of the input.
"""
return list(_yield_flat_nest(nest)) if is_sequence(nest) else [nest]
def _recursive_assert_same_structure(nest1, nest2):
is_sequence_nest1 = is_sequence(nest1)
if is_sequence_nest1 != is_sequence(nest2):
raise ValueError(
"The two structures don't have the same nested structure. "
"First structure: %s, second structure: %s." % (nest1, nest2))
if is_sequence_nest1:
type_nest1 = type(nest1)
type_nest2 = type(nest2)
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2))
for n1, n2 in zip(nest1, nest2):
_recursive_assert_same_structure(n1, n2)
def assert_same_structure(nest1, nest2):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures.
"""
len_nest1 = len(flatten(nest1)) if is_sequence(nest1) else 1
len_nest2 = len(flatten(nest2)) if is_sequence(nest2) else 1
if len_nest1 != len_nest2:
raise ValueError("The two structures don't have the same number of "
"elements. First structure: %s, second structure: %s."
% (nest1, nest2))
_recursive_assert_same_structure(nest1, nest2)
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
if not isinstance(dictionary, dict):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in structure:
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(_sequence_like(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not is_sequence(flat_sequence):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return _sequence_like(structure, packed)
def map_structure(func, *structure):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that acceps as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
for other in structure[1:]:
assert_same_structure(structure[0], other)
flat_structure = [flatten(s) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = [flatten_up_to(shallow_tree, input_tree)
for input_tree in inputs]
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
| apache-2.0 |
scrollback/kuma | vendor/packages/nose/nose/plugins/prof.py | 106 | 5357 | """This plugin will run tests using the hotshot profiler, which is part
of the standard library. To turn it on, use the ``--with-profile`` option
or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
and the profiler output file may be changed with ``--profile-stats-file``.
See the `hotshot documentation`_ in the standard library documentation for
more details on the various output options.
.. _hotshot documentation: http://docs.python.org/library/hotshot.html
"""
try:
import hotshot
from hotshot import stats
except ImportError:
hotshot, stats = None, None
import logging
import os
import sys
import tempfile
from nose.plugins.base import Plugin
from nose.util import tolist
log = logging.getLogger('nose.plugins')
class Profile(Plugin):
"""
Use this plugin to run tests using the hotshot profiler.
"""
pfile = None
clean_stats_file = False
def options(self, parser, env):
"""Register commandline options.
"""
if not self.available():
return
Plugin.options(self, parser, env)
parser.add_option('--profile-sort', action='store', dest='profile_sort',
default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
metavar="SORT",
help="Set sort order for profiler output")
parser.add_option('--profile-stats-file', action='store',
dest='profile_stats_file',
metavar="FILE",
default=env.get('NOSE_PROFILE_STATS_FILE'),
help='Profiler stats file; default is a new '
'temp file on each run')
parser.add_option('--profile-restrict', action='append',
dest='profile_restrict',
metavar="RESTRICT",
default=env.get('NOSE_PROFILE_RESTRICT'),
help="Restrict profiler output. See help for "
"pstats.Stats for details")
def available(cls):
return hotshot is not None
available = classmethod(available)
def begin(self):
"""Create profile stats file and load profiler.
"""
if not self.available():
return
self._create_pfile()
self.prof = hotshot.Profile(self.pfile)
def configure(self, options, conf):
"""Configure plugin.
"""
if not self.available():
self.enabled = False
return
Plugin.configure(self, options, conf)
self.conf = conf
if options.profile_stats_file:
self.pfile = options.profile_stats_file
self.clean_stats_file = False
else:
self.pfile = None
self.clean_stats_file = True
self.fileno = None
self.sort = options.profile_sort
self.restrict = tolist(options.profile_restrict)
def prepareTest(self, test):
"""Wrap entire test run in :func:`prof.runcall`.
"""
if not self.available():
return
log.debug('preparing test %s' % test)
def run_and_profile(result, prof=self.prof, test=test):
self._create_pfile()
prof.runcall(test, result)
return run_and_profile
def report(self, stream):
"""Output profiler report.
"""
log.debug('printing profiler report')
self.prof.close()
prof_stats = stats.load(self.pfile)
prof_stats.sort_stats(self.sort)
# 2.5 has completely different stream handling from 2.4 and earlier.
# Before 2.5, stats objects have no stream attribute; in 2.5 and later
# a reference sys.stdout is stored before we can tweak it.
compat_25 = hasattr(prof_stats, 'stream')
if compat_25:
tmp = prof_stats.stream
prof_stats.stream = stream
else:
tmp = sys.stdout
sys.stdout = stream
try:
if self.restrict:
log.debug('setting profiler restriction to %s', self.restrict)
prof_stats.print_stats(*self.restrict)
else:
prof_stats.print_stats()
finally:
if compat_25:
prof_stats.stream = tmp
else:
sys.stdout = tmp
def finalize(self, result):
"""Clean up stats file, if configured to do so.
"""
if not self.available():
return
try:
self.prof.close()
except AttributeError:
# TODO: is this trying to catch just the case where not
# hasattr(self.prof, "close")? If so, the function call should be
# moved out of the try: suite.
pass
if self.clean_stats_file:
if self.fileno:
try:
os.close(self.fileno)
except OSError:
pass
try:
os.unlink(self.pfile)
except OSError:
pass
return None
def _create_pfile(self):
if not self.pfile:
self.fileno, self.pfile = tempfile.mkstemp()
self.clean_stats_file = True
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.