repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
hfp/tensorflow-xsmm
|
tensorflow/python/util/tf_stack.py
|
25
|
3807
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions used to extract and analyze stacks. Faster than Python libs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import linecache
import sys
# Names for indices into TF traceback tuples.
TB_FILENAME = 0
TB_LINENO = 1
TB_FUNCNAME = 2
TB_CODEDICT = 3 # Dictionary of Python interpreter state.
def extract_stack(extract_frame_info_fn=None):
"""A lightweight, extensible re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Args:
extract_frame_info_fn: Optional callable fn(stack_frame) applied to each
stack frame. This callable's return value is stored as the sixth (last)
element of the returned tuples. If not provided, the returned tuples
will have None as their sixth value.
Returns:
A list of 6-tuples
(filename, lineno, name, frame_globals, func_start_lineno, custom_info)
corresponding to the call stack of the current thread. The returned tuples
have the innermost stack frame at the end, unlike the Python inspect
module's stack() function.
"""
default_fn = lambda f: None
extract_frame_info_fn = extract_frame_info_fn or default_fn
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
func_start_lineno = co.co_firstlineno
frame_info = extract_frame_info_fn(f)
ret.append((filename, lineno, name, frame_globals, func_start_lineno,
frame_info))
f = f.f_back
ret.reverse()
return ret
def convert_stack(stack, include_func_start_lineno=False):
"""Converts a stack extracted using extract_stack() to a traceback stack.
Args:
stack: A list of n 5-tuples,
(filename, lineno, name, frame_globals, func_start_lineno).
include_func_start_lineno: True if function start line number should be
included as the 5th entry in return tuples.
Returns:
A list of n 4-tuples or 5-tuples
(filename, lineno, name, code, [optional: func_start_lineno]), where the
code tuple element is calculated from the corresponding elements of the
input tuple.
"""
ret = []
for (filename, lineno, name, frame_globals, func_start_lineno,
unused_frame_info) in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
if include_func_start_lineno:
ret.append((filename, lineno, name, line, func_start_lineno))
else:
ret.append((filename, lineno, name, line))
return ret
|
apache-2.0
|
spallavolu/scikit-learn
|
sklearn/utils/sparsefuncs.py
|
220
|
11424
|
# Authors: Manoj Kumar
# Thomas Unterthiner
# License: BSD 3 clause
import scipy.sparse as sp
import numpy as np
from .fixes import sparse_min_max, bincount
from .sparsefuncs_fast import csr_mean_variance_axis0 as _csr_mean_var_axis0
from .sparsefuncs_fast import csc_mean_variance_axis0 as _csc_mean_var_axis0
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
def inplace_csr_row_scale(X, scale):
""" Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
def mean_variance_axis(X, axis):
"""Compute mean and variance along axis 0 on a CSR or CSC matrix
Parameters
----------
X: CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
means: float array with shape (n_features,)
Feature-wise means
variances: float array with shape (n_features,)
Feature-wise variances
"""
if axis not in (0, 1):
raise ValueError(
"Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X: CSC or CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale: float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_row_scale(X, scale):
""" Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_features,)
Array of precomputed sample-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_column_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_swap_row_csc(X, m, n):
"""
Swaps two rows of a CSC matrix in-place.
Parameters
----------
X: scipy.sparse.csc_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
def inplace_swap_row_csr(X, m, n):
"""
Swaps two rows of a CSR matrix in-place.
Parameters
----------
X: scipy.sparse.csr_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:]])
def inplace_swap_row(X, m, n):
"""
Swaps two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
def inplace_swap_column(X, m, n):
"""
Swaps two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two columns are to be swapped.
m: int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
def min_max_axis(X, axis):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
mins: float array with shape (n_features,)
Feature-wise minima
maxs: float array with shape (n_features,)
Feature-wise maxima
"""
if isinstance(X, sp.csr_matrix) or isinstance(X, sp.csc_matrix):
return sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : CSR sparse matrix, shape = (n_samples, n_labels)
Input data.
axis : None, 0 or 1
The axis on which the data is aggregated.
sample_weight : array, shape = (n_samples,), optional
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
return out
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis))
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data in-place
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) +
_get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : CSC sparse matrix, shape (n_samples, n_features)
Input data.
Returns
-------
median : ndarray, shape (n_features,)
Median.
"""
if not isinstance(X, sp.csc_matrix):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
# Prevent modifying X in place
data = np.copy(X.data[start: end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
|
bsd-3-clause
|
rklabs/scrapy
|
scrapy/downloadermiddlewares/cookies.py
|
128
|
3330
|
import os
import six
import logging
from collections import defaultdict
from scrapy.exceptions import NotConfigured
from scrapy.http import Response
from scrapy.http.cookies import CookieJar
from scrapy.utils.python import to_native_str
logger = logging.getLogger(__name__)
class CookiesMiddleware(object):
"""This middleware enables working with sites that need cookies"""
def __init__(self, debug=False):
self.jars = defaultdict(CookieJar)
self.debug = debug
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COOKIES_ENABLED'):
raise NotConfigured
return cls(crawler.settings.getbool('COOKIES_DEBUG'))
def process_request(self, request, spider):
if request.meta.get('dont_merge_cookies', False):
return
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
for cookie in cookies:
jar.set_cookie_if_ok(cookie, request)
# set Cookie header
request.headers.pop('Cookie', None)
jar.add_cookie_header(request)
self._debug_cookie(request, spider)
def process_response(self, request, response, spider):
if request.meta.get('dont_merge_cookies', False):
return response
# extract cookies from Set-Cookie and drop invalid/expired cookies
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
jar.extract_cookies(response, request)
self._debug_set_cookie(response, spider)
return response
def _debug_cookie(self, request, spider):
if self.debug:
cl = [to_native_str(c, errors='replace')
for c in request.headers.getlist('Cookie')]
if cl:
cookies = "\n".join("Cookie: {}\n".format(c) for c in cl)
msg = "Sending cookies to: {}\n{}".format(request, cookies)
logger.debug(msg, extra={'spider': spider})
def _debug_set_cookie(self, response, spider):
if self.debug:
cl = [to_native_str(c, errors='replace')
for c in response.headers.getlist('Set-Cookie')]
if cl:
cookies = "\n".join("Set-Cookie: {}\n".format(c) for c in cl)
msg = "Received cookies from: {}\n{}".format(response, cookies)
logger.debug(msg, extra={'spider': spider})
def _format_cookie(self, cookie):
# build cookie string
cookie_str = '%s=%s' % (cookie['name'], cookie['value'])
if cookie.get('path', None):
cookie_str += '; Path=%s' % cookie['path']
if cookie.get('domain', None):
cookie_str += '; Domain=%s' % cookie['domain']
return cookie_str
def _get_request_cookies(self, jar, request):
if isinstance(request.cookies, dict):
cookie_list = [{'name': k, 'value': v} for k, v in \
six.iteritems(request.cookies)]
else:
cookie_list = request.cookies
cookies = [self._format_cookie(x) for x in cookie_list]
headers = {'Set-Cookie': cookies}
response = Response(request.url, headers=headers)
return jar.make_cookies(response, request)
|
bsd-3-clause
|
VikingDen/jira
|
docs/conf.py
|
8
|
7887
|
# -*- coding: utf-8 -*-
#
# JIRA Python Client documentation build configuration file, created by
# sphinx-quickstart on Thu May 3 17:01:50 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jira-python'
copyright = u'2012, Atlassian Pty Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from jira.version import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jirapythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'jirapython.tex', u'jira-python Documentation',
u'Atlassian Pty Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jirapython', u'jira-python Documentation',
[u'Atlassian Pty Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jirapython', u'jira-python Documentation',
u'Atlassian Pty Ltd.', 'jirapython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
bsd-2-clause
|
beck/django
|
tests/template_tests/filter_tests/test_slugify.py
|
324
|
1430
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import slugify
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class SlugifyTests(SimpleTestCase):
"""
Running slugify on a pre-escaped string leads to odd behavior,
but the result is still safe.
"""
@setup({'slugify01': '{% autoescape off %}{{ a|slugify }} {{ b|slugify }}{% endautoescape %}'})
def test_slugify01(self):
output = self.engine.render_to_string('slugify01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
@setup({'slugify02': '{{ a|slugify }} {{ b|slugify }}'})
def test_slugify02(self):
output = self.engine.render_to_string('slugify02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a-b a-amp-b')
class FunctionTests(SimpleTestCase):
def test_slugify(self):
self.assertEqual(
slugify(' Jack & Jill like numbers 1,2,3 and 4 and silly characters ?%.$!/'),
'jack-jill-like-numbers-123-and-4-and-silly-characters',
)
def test_unicode(self):
self.assertEqual(
slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
'un-elephant-a-loree-du-bois',
)
def test_non_string_input(self):
self.assertEqual(slugify(123), '123')
|
bsd-3-clause
|
andrius-preimantas/odoo
|
addons/website_forum/models/forum.py
|
6
|
32098
|
# -*- coding: utf-8 -*-
from datetime import datetime
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.addons.website.models.website import slug
from openerp.osv import osv, fields
from openerp.tools import html2plaintext
from openerp.tools.translate import _
class KarmaError(ValueError):
""" Karma-related error, used for forum and posts. """
pass
class Forum(osv.Model):
"""TDE TODO: set karma values for actions dynamic for a given forum"""
_name = 'forum.forum'
_description = 'Forums'
_inherit = ['mail.thread', 'website.seo.metadata']
_columns = {
'name': fields.char('Name', required=True, translate=True),
'faq': fields.html('Guidelines'),
'description': fields.html('Description'),
# karma generation
'karma_gen_question_new': fields.integer('Karma earned for new questions'),
'karma_gen_question_upvote': fields.integer('Karma earned for upvoting a question'),
'karma_gen_question_downvote': fields.integer('Karma earned for downvoting a question'),
'karma_gen_answer_upvote': fields.integer('Karma earned for upvoting an answer'),
'karma_gen_answer_downvote': fields.integer('Karma earned for downvoting an answer'),
'karma_gen_answer_accept': fields.integer('Karma earned for accepting an anwer'),
'karma_gen_answer_accepted': fields.integer('Karma earned for having an answer accepted'),
'karma_gen_answer_flagged': fields.integer('Karma earned for having an answer flagged'),
# karma-based actions
'karma_ask': fields.integer('Karma to ask a new question'),
'karma_answer': fields.integer('Karma to answer a question'),
'karma_edit_own': fields.integer('Karma to edit its own posts'),
'karma_edit_all': fields.integer('Karma to edit all posts'),
'karma_close_own': fields.integer('Karma to close its own posts'),
'karma_close_all': fields.integer('Karma to close all posts'),
'karma_unlink_own': fields.integer('Karma to delete its own posts'),
'karma_unlink_all': fields.integer('Karma to delete all posts'),
'karma_upvote': fields.integer('Karma to upvote'),
'karma_downvote': fields.integer('Karma to downvote'),
'karma_answer_accept_own': fields.integer('Karma to accept an answer on its own questions'),
'karma_answer_accept_all': fields.integer('Karma to accept an answers to all questions'),
'karma_editor_link_files': fields.integer('Karma for linking files (Editor)'),
'karma_editor_clickable_link': fields.integer('Karma for clickable links (Editor)'),
'karma_comment_own': fields.integer('Karma to comment its own posts'),
'karma_comment_all': fields.integer('Karma to comment all posts'),
'karma_comment_convert_own': fields.integer('Karma to convert its own answers to comments and vice versa'),
'karma_comment_convert_all': fields.integer('Karma to convert all answers to answers and vice versa'),
'karma_comment_unlink_own': fields.integer('Karma to unlink its own comments'),
'karma_comment_unlink_all': fields.integer('Karma to unlinnk all comments'),
'karma_retag': fields.integer('Karma to change question tags'),
'karma_flag': fields.integer('Karma to flag a post as offensive'),
}
def _get_default_faq(self, cr, uid, context=None):
fname = openerp.modules.get_module_resource('website_forum', 'data', 'forum_default_faq.html')
with open(fname, 'r') as f:
return f.read()
return False
_defaults = {
'description': 'This community is for professionals and enthusiasts of our products and services.',
'faq': _get_default_faq,
'karma_gen_question_new': 2,
'karma_gen_question_upvote': 5,
'karma_gen_question_downvote': -2,
'karma_gen_answer_upvote': 10,
'karma_gen_answer_downvote': -2,
'karma_gen_answer_accept': 2,
'karma_gen_answer_accepted': 15,
'karma_gen_answer_flagged': -100,
'karma_ask': 0,
'karma_answer': 0,
'karma_edit_own': 1,
'karma_edit_all': 300,
'karma_close_own': 100,
'karma_close_all': 500,
'karma_unlink_own': 500,
'karma_unlink_all': 1000,
'karma_upvote': 5,
'karma_downvote': 50,
'karma_answer_accept_own': 20,
'karma_answer_accept_all': 500,
'karma_editor_link_files': 20,
'karma_editor_clickable_link': 20,
'karma_comment_own': 1,
'karma_comment_all': 1,
'karma_comment_convert_own': 50,
'karma_comment_convert_all': 500,
'karma_comment_unlink_own': 50,
'karma_comment_unlink_all': 500,
'karma_retag': 75,
'karma_flag': 500,
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
return super(Forum, self).create(cr, uid, values, context=create_context)
class Post(osv.Model):
_name = 'forum.post'
_description = 'Forum Post'
_inherit = ['mail.thread', 'website.seo.metadata']
_order = "is_correct DESC, vote_count DESC"
def _get_user_vote(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
vote_ids = self.pool['forum.post.vote'].search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], context=context)
for vote in self.pool['forum.post.vote'].browse(cr, uid, vote_ids, context=context):
res[vote.post_id.id] = vote.vote
return res
def _get_vote_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
for vote in post.vote_ids:
res[post.id] += int(vote.vote)
return res
def _get_post_from_vote(self, cr, uid, ids, context=None):
result = {}
for vote in self.pool['forum.post.vote'].browse(cr, uid, ids, context=context):
result[vote.post_id.id] = True
return result.keys()
def _get_user_favourite(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
if uid in [f.id for f in post.favourite_ids]:
res[post.id] = True
return res
def _get_favorite_count(self, cr, uid, ids, field_name, arg, context):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] += len(post.favourite_ids)
return res
def _get_post_from_hierarchy(self, cr, uid, ids, context=None):
post_ids = set(ids)
for post in self.browse(cr, SUPERUSER_ID, ids, context=context):
if post.parent_id:
post_ids.add(post.parent_id.id)
return list(post_ids)
def _get_child_count(self, cr, uid, ids, field_name=False, arg={}, context=None):
res = dict.fromkeys(ids, 0)
for post in self.browse(cr, uid, ids, context=context):
if post.parent_id:
res[post.parent_id.id] = len(post.parent_id.child_ids)
else:
res[post.id] = len(post.child_ids)
return res
def _get_uid_answered(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = any(answer.create_uid.id == uid for answer in post.child_ids)
return res
def _get_has_validated_answer(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
ans_ids = self.search(cr, uid, [('parent_id', 'in', ids), ('is_correct', '=', True)], context=context)
for answer in self.browse(cr, uid, ans_ids, context=context):
res[answer.parent_id.id] = True
return res
def _is_self_reply(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = post.parent_id and post.parent_id.create_uid == post.create_uid or False
return res
def _get_post_karma_rights(self, cr, uid, ids, field_name, arg, context=None):
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
res = dict.fromkeys(ids, False)
for post in self.browse(cr, uid, ids, context=context):
res[post.id] = {
'karma_ask': post.forum_id.karma_ask,
'karma_answer': post.forum_id.karma_answer,
'karma_accept': post.parent_id and post.parent_id.create_uid.id == uid and post.forum_id.karma_answer_accept_own or post.forum_id.karma_answer_accept_all,
'karma_edit': post.create_uid.id == uid and post.forum_id.karma_edit_own or post.forum_id.karma_edit_all,
'karma_close': post.create_uid.id == uid and post.forum_id.karma_close_own or post.forum_id.karma_close_all,
'karma_unlink': post.create_uid.id == uid and post.forum_id.karma_unlink_own or post.forum_id.karma_unlink_all,
'karma_upvote': post.forum_id.karma_upvote,
'karma_downvote': post.forum_id.karma_downvote,
'karma_comment': post.create_uid.id == uid and post.forum_id.karma_comment_own or post.forum_id.karma_comment_all,
'karma_comment_convert': post.create_uid.id == uid and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all,
}
res[post.id].update({
'can_ask': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_ask'],
'can_answer': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_answer'],
'can_accept': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_accept'],
'can_edit': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_edit'],
'can_close': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_close'],
'can_unlink': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_unlink'],
'can_upvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_upvote'],
'can_downvote': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_downvote'],
'can_comment': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment'],
'can_comment_convert': uid == SUPERUSER_ID or user.karma >= res[post.id]['karma_comment_convert'],
})
return res
_columns = {
'name': fields.char('Title'),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'content': fields.html('Content'),
'tag_ids': fields.many2many('forum.tag', 'forum_tag_rel', 'forum_id', 'forum_tag_id', 'Tags'),
'state': fields.selection([('active', 'Active'), ('close', 'Close'), ('offensive', 'Offensive')], 'Status'),
'views': fields.integer('Number of Views'),
'active': fields.boolean('Active'),
'is_correct': fields.boolean('Valid Answer', help='Correct Answer or Answer on this question accepted.'),
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', 'in', ['email', 'comment'])
],
string='Post Messages', help="Comments on forum post",
),
# history
'create_date': fields.datetime('Asked on', select=True, readonly=True),
'create_uid': fields.many2one('res.users', 'Created by', select=True, readonly=True),
'write_date': fields.datetime('Update on', select=True, readonly=True),
'write_uid': fields.many2one('res.users', 'Updated by', select=True, readonly=True),
# vote fields
'vote_ids': fields.one2many('forum.post.vote', 'post_id', 'Votes'),
'user_vote': fields.function(_get_user_vote, string='My Vote', type='integer'),
'vote_count': fields.function(
_get_vote_count, string="Votes", type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['vote_ids'], 10),
'forum.post.vote': (_get_post_from_vote, [], 10),
}),
# favorite fields
'favourite_ids': fields.many2many('res.users', string='Favourite'),
'user_favourite': fields.function(_get_user_favourite, string="My Favourite", type='boolean'),
'favourite_count': fields.function(
_get_favorite_count, string='Favorite Count', type='integer',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['favourite_ids'], 10),
}),
# hierarchy
'parent_id': fields.many2one('forum.post', 'Question', ondelete='cascade'),
'self_reply': fields.function(
_is_self_reply, 'Reply to own question', type='boolean',
store={
'forum.post': (lambda self, cr, uid, ids, c={}: ids, ['parent_id', 'create_uid'], 10),
}),
'child_ids': fields.one2many('forum.post', 'parent_id', 'Answers'),
'child_count': fields.function(
_get_child_count, string="Answers", type='integer',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids'], 10),
}),
'uid_has_answered': fields.function(
_get_uid_answered, string='Has Answered', type='boolean',
),
'has_validated_answer': fields.function(
_get_has_validated_answer, string='Has a Validated Answered', type='boolean',
store={
'forum.post': (_get_post_from_hierarchy, ['parent_id', 'child_ids', 'is_correct'], 10),
}
),
# closing
'closed_reason_id': fields.many2one('forum.post.reason', 'Reason'),
'closed_uid': fields.many2one('res.users', 'Closed by', select=1),
'closed_date': fields.datetime('Closed on', readonly=True),
# karma
'karma_ask': fields.function(_get_post_karma_rights, string='Karma to ask', type='integer', multi='_get_post_karma_rights'),
'karma_answer': fields.function(_get_post_karma_rights, string='Karma to answer', type='integer', multi='_get_post_karma_rights'),
'karma_accept': fields.function(_get_post_karma_rights, string='Karma to accept this answer', type='integer', multi='_get_post_karma_rights'),
'karma_edit': fields.function(_get_post_karma_rights, string='Karma to edit', type='integer', multi='_get_post_karma_rights'),
'karma_close': fields.function(_get_post_karma_rights, string='Karma to close', type='integer', multi='_get_post_karma_rights'),
'karma_unlink': fields.function(_get_post_karma_rights, string='Karma to unlink', type='integer', multi='_get_post_karma_rights'),
'karma_upvote': fields.function(_get_post_karma_rights, string='Karma to upvote', type='integer', multi='_get_post_karma_rights'),
'karma_downvote': fields.function(_get_post_karma_rights, string='Karma to downvote', type='integer', multi='_get_post_karma_rights'),
'karma_comment': fields.function(_get_post_karma_rights, string='Karma to comment', type='integer', multi='_get_post_karma_rights'),
'karma_comment_convert': fields.function(_get_post_karma_rights, string='karma to convert as a comment', type='integer', multi='_get_post_karma_rights'),
# access rights
'can_ask': fields.function(_get_post_karma_rights, string='Can Ask', type='boolean', multi='_get_post_karma_rights'),
'can_answer': fields.function(_get_post_karma_rights, string='Can Answer', type='boolean', multi='_get_post_karma_rights'),
'can_accept': fields.function(_get_post_karma_rights, string='Can Accept', type='boolean', multi='_get_post_karma_rights'),
'can_edit': fields.function(_get_post_karma_rights, string='Can Edit', type='boolean', multi='_get_post_karma_rights'),
'can_close': fields.function(_get_post_karma_rights, string='Can Close', type='boolean', multi='_get_post_karma_rights'),
'can_unlink': fields.function(_get_post_karma_rights, string='Can Unlink', type='boolean', multi='_get_post_karma_rights'),
'can_upvote': fields.function(_get_post_karma_rights, string='Can Upvote', type='boolean', multi='_get_post_karma_rights'),
'can_downvote': fields.function(_get_post_karma_rights, string='Can Downvote', type='boolean', multi='_get_post_karma_rights'),
'can_comment': fields.function(_get_post_karma_rights, string='Can Comment', type='boolean', multi='_get_post_karma_rights'),
'can_comment_convert': fields.function(_get_post_karma_rights, string='Can Convert to Comment', type='boolean', multi='_get_post_karma_rights'),
}
_defaults = {
'state': 'active',
'views': 0,
'active': True,
'vote_ids': list(),
'favourite_ids': list(),
'child_ids': list(),
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
create_context = dict(context, mail_create_nolog=True)
post_id = super(Post, self).create(cr, uid, vals, context=create_context)
post = self.browse(cr, SUPERUSER_ID, post_id, context=context) # SUPERUSER_ID to avoid read access rights issues when creating
# karma-based access
if post.parent_id and not post.can_ask:
raise KarmaError('Not enough karma to create a new question')
elif not post.parent_id and not post.can_answer:
raise KarmaError('Not enough karma to answer to a question')
# messaging and chatter
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
if post.parent_id:
body = _(
'<p>A new answer for <i>%s</i> has been posted. <a href="%s/forum/%s/question/%s">Click here to access the post.</a></p>' %
(post.parent_id.name, base_url, slug(post.parent_id.forum_id), slug(post.parent_id))
)
self.message_post(cr, uid, post.parent_id.id, subject=_('Re: %s') % post.parent_id.name, body=body, subtype='website_forum.mt_answer_new', context=context)
else:
body = _(
'<p>A new question <i>%s</i> has been asked on %s. <a href="%s/forum/%s/question/%s">Click here to access the question.</a></p>' %
(post.name, post.forum_id.name, base_url, slug(post.forum_id), slug(post))
)
self.message_post(cr, uid, post_id, subject=post.name, body=body, subtype='website_forum.mt_question_new', context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_question_new, context=context)
return post_id
def write(self, cr, uid, ids, vals, context=None):
posts = self.browse(cr, uid, ids, context=context)
if 'state' in vals:
if vals['state'] in ['active', 'close'] and any(not post.can_close for post in posts):
raise KarmaError('Not enough karma to close or reopen a post.')
if 'active' in vals:
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to delete or reactivate a post')
if 'is_correct' in vals:
if any(not post.can_accept for post in posts):
raise KarmaError('Not enough karma to accept or refuse an answer')
# update karma except for self-acceptance
mult = 1 if vals['is_correct'] else -1
for post in self.browse(cr, uid, ids, context=context):
if vals['is_correct'] != post.is_correct and post.create_uid.id != uid:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * mult, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * mult, context=context)
if any(key not in ['state', 'active', 'is_correct', 'closed_uid', 'closed_date', 'closed_reason_id'] for key in vals.keys()) and any(not post.can_edit for post in posts):
raise KarmaError('Not enough karma to edit a post.')
res = super(Post, self).write(cr, uid, ids, vals, context=context)
# if post content modify, notify followers
if 'content' in vals or 'name' in vals:
for post in posts:
if post.parent_id:
body, subtype = _('Answer Edited'), 'website_forum.mt_answer_edit'
obj_id = post.parent_id.id
else:
body, subtype = _('Question Edited'), 'website_forum.mt_question_edit'
obj_id = post.id
self.message_post(cr, uid, obj_id, body=body, subtype=subtype, context=context)
return res
def close(self, cr, uid, ids, reason_id, context=None):
if any(post.parent_id for post in self.browse(cr, uid, ids, context=context)):
return False
return self.pool['forum.post'].write(cr, uid, ids, {
'state': 'close',
'closed_uid': uid,
'closed_date': datetime.today().strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT),
'closed_reason_id': reason_id,
}, context=context)
def unlink(self, cr, uid, ids, context=None):
posts = self.browse(cr, uid, ids, context=context)
if any(not post.can_unlink for post in posts):
raise KarmaError('Not enough karma to unlink a post')
# if unlinking an answer with accepted answer: remove provided karma
for post in posts:
if post.is_correct:
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [post.create_uid.id], post.forum_id.karma_gen_answer_accepted * -1, context=context)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [uid], post.forum_id.karma_gen_answer_accept * -1, context=context)
return super(Post, self).unlink(cr, uid, ids, context=context)
def vote(self, cr, uid, ids, upvote=True, context=None):
posts = self.browse(cr, uid, ids, context=context)
if upvote and any(not post.can_upvote for post in posts):
raise KarmaError('Not enough karma to upvote.')
elif not upvote and any(not post.can_downvote for post in posts):
raise KarmaError('Not enough karma to downvote.')
Vote = self.pool['forum.post.vote']
vote_ids = Vote.search(cr, uid, [('post_id', 'in', ids), ('user_id', '=', uid)], limit=1, context=context)
if vote_ids:
for vote in Vote.browse(cr, uid, vote_ids, context=context):
if upvote:
new_vote = '0' if vote.vote == '-1' else '1'
else:
new_vote = '0' if vote.vote == '1' else '-1'
Vote.write(cr, uid, vote_ids, {'vote': new_vote}, context=context)
else:
for post_id in ids:
new_vote = '1' if upvote else '-1'
Vote.create(cr, uid, {'post_id': post_id, 'vote': new_vote}, context=context)
return {'vote_count': self._get_vote_count(cr, uid, ids, None, None, context=context)[ids[0]]}
def convert_answer_to_comment(self, cr, uid, id, context=None):
""" Tools to convert an answer (forum.post) to a comment (mail.message).
The original post is unlinked and a new comment is posted on the question
using the post create_uid as the comment's author. """
post = self.browse(cr, uid, id, context=context)
if not post.parent_id:
return False
# karma-based action check: use the post field that computed own/all value
if not post.can_comment_convert:
raise KarmaError('Not enough karma to convert an answer to a comment')
# post the message
question = post.parent_id
values = {
'author_id': post.create_uid.partner_id.id,
'body': html2plaintext(post.content),
'type': 'comment',
'subtype': 'mail.mt_comment',
'date': post.create_date,
}
message_id = self.pool['forum.post'].message_post(
cr, uid, question.id,
context=dict(context, mail_create_nosubcribe=True),
**values)
# unlink the original answer, using SUPERUSER_ID to avoid karma issues
self.pool['forum.post'].unlink(cr, SUPERUSER_ID, [post.id], context=context)
return message_id
def convert_comment_to_answer(self, cr, uid, message_id, default=None, context=None):
""" Tool to convert a comment (mail.message) into an answer (forum.post).
The original comment is unlinked and a new answer from the comment's author
is created. Nothing is done if the comment's author already answered the
question. """
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, comment.res_id, context=context)
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not comment.author_id or not comment.author_id.user_ids: # only comment posted by users can be converted
return False
# karma-based action check: must check the message's author to know if own / all
karma_convert = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_convert_own or post.forum_id.karma_comment_convert_all
can_convert = uid == SUPERUSER_ID or user.karma >= karma_convert
if not can_convert:
raise KarmaError('Not enough karma to convert a comment to an answer')
# check the message's author has not already an answer
question = post.parent_id if post.parent_id else post
post_create_uid = comment.author_id.user_ids[0]
if any(answer.create_uid.id == post_create_uid.id for answer in question.child_ids):
return False
# create the new post
post_values = {
'forum_id': question.forum_id.id,
'content': comment.body,
'parent_id': question.id,
}
# done with the author user to have create_uid correctly set
new_post_id = self.pool['forum.post'].create(cr, post_create_uid.id, post_values, context=context)
# delete comment
self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [comment.id], context=context)
return new_post_id
def unlink_comment(self, cr, uid, id, message_id, context=None):
comment = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
post = self.pool['forum.post'].browse(cr, uid, id, context=context)
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if not comment.model == 'forum.post' or not comment.res_id == id:
return False
# karma-based action check: must check the message's author to know if own or all
karma_unlink = comment.author_id.id == user.partner_id.id and post.forum_id.karma_comment_unlink_own or post.forum_id.karma_comment_unlink_all
can_unlink = uid == SUPERUSER_ID or user.karma >= karma_unlink
if not can_unlink:
raise KarmaError('Not enough karma to unlink a comment')
return self.pool['mail.message'].unlink(cr, SUPERUSER_ID, [message_id], context=context)
def set_viewed(self, cr, uid, ids, context=None):
cr.execute("""UPDATE forum_post SET views = views+1 WHERE id IN %s""", (tuple(ids),))
return True
def _get_access_link(self, cr, uid, mail, partner, context=None):
post = self.pool['forum.post'].browse(cr, uid, mail.res_id, context=context)
res_id = post.parent_id and "%s#answer-%s" % (post.parent_id.id, post.id) or post.id
return "/forum/%s/question/%s" % (post.forum_id.id, res_id)
class PostReason(osv.Model):
_name = "forum.post.reason"
_description = "Post Closing Reason"
_order = 'name'
_columns = {
'name': fields.char('Post Reason', required=True, translate=True),
}
class Vote(osv.Model):
_name = 'forum.post.vote'
_description = 'Vote'
_columns = {
'post_id': fields.many2one('forum.post', 'Post', ondelete='cascade', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'vote': fields.selection([('1', '1'), ('-1', '-1'), ('0', '0')], 'Vote', required=True),
'create_date': fields.datetime('Create Date', select=True, readonly=True),
}
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'vote': lambda *args: '1',
}
def _get_karma_value(self, old_vote, new_vote, up_karma, down_karma):
_karma_upd = {
'-1': {'-1': 0, '0': -1 * down_karma, '1': -1 * down_karma + up_karma},
'0': {'-1': 1 * down_karma, '0': 0, '1': up_karma},
'1': {'-1': -1 * up_karma + down_karma, '0': -1 * up_karma, '1': 0}
}
return _karma_upd[old_vote][new_vote]
def create(self, cr, uid, vals, context=None):
vote_id = super(Vote, self).create(cr, uid, vals, context=context)
vote = self.browse(cr, uid, vote_id, context=context)
if vote.post_id.parent_id:
karma_value = self._get_karma_value('0', vote.vote, vote.post_id.forum_id.karma_gen_answer_upvote, vote.post_id.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value('0', vote.vote, vote.post_id.forum_id.karma_gen_question_upvote, vote.post_id.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.post_id.create_uid.id], karma_value, context=context)
return vote_id
def write(self, cr, uid, ids, values, context=None):
if 'vote' in values:
for vote in self.browse(cr, uid, ids, context=context):
if vote.post_id.parent_id:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.post_id.forum_id.karma_gen_answer_upvote, vote.post_id.forum_id.karma_gen_answer_downvote)
else:
karma_value = self._get_karma_value(vote.vote, values['vote'], vote.post_id.forum_id.karma_gen_question_upvote, vote.post_id.forum_id.karma_gen_question_downvote)
self.pool['res.users'].add_karma(cr, SUPERUSER_ID, [vote.post_id.create_uid.id], karma_value, context=context)
res = super(Vote, self).write(cr, uid, ids, values, context=context)
return res
class Tags(osv.Model):
_name = "forum.tag"
_description = "Tag"
_inherit = ['website.seo.metadata']
def _get_posts_count(self, cr, uid, ids, field_name, arg, context=None):
return dict((tag_id, self.pool['forum.post'].search_count(cr, uid, [('tag_ids', 'in', tag_id)], context=context)) for tag_id in ids)
def _get_tag_from_post(self, cr, uid, ids, context=None):
return list(set(
[tag.id for post in self.pool['forum.post'].browse(cr, SUPERUSER_ID, ids, context=context) for tag in post.tag_ids]
))
_columns = {
'name': fields.char('Name', required=True),
'forum_id': fields.many2one('forum.forum', 'Forum', required=True),
'post_ids': fields.many2many('forum.post', 'forum_tag_rel', 'tag_id', 'post_id', 'Posts'),
'posts_count': fields.function(
_get_posts_count, type='integer', string="Number of Posts",
store={
'forum.post': (_get_tag_from_post, ['tag_ids'], 10),
}
),
'create_uid': fields.many2one('res.users', 'Created by', readonly=True),
}
|
agpl-3.0
|
MichSchli/QuestionAnsweringGCN
|
models/tensorflow_components/graph/assignment_view.py
|
1
|
1394
|
import tensorflow as tf
class AssignmentView:
def __init__(self):
self.variables = {}
self.variables["total_vertex_count"] = tf.placeholder(tf.int32)
self.variables["vertex_indices"] = tf.placeholder(tf.int32)
self.variables["from_range"] = tf.placeholder(tf.int32)
def get_all_vectors(self, view_embeddings):
from_size = tf.shape(view_embeddings)[0]
from_indices = self.variables["from_range"]
to_size = self.variables["total_vertex_count"]
to_indices = self.variables["vertex_indices"]
values = tf.ones_like(from_indices, dtype=tf.float32)
stacked_indices = tf.transpose(tf.stack([to_indices, from_indices]))
indices = tf.to_int64(stacked_indices)
shape = tf.to_int64([to_size, from_size])
matrix = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
return tf.sparse_tensor_dense_matmul(matrix, view_embeddings)
def assign(self, target_indices, total_vertex_count, from_range):
self.variable_assignments = {}
self.variable_assignments["total_vertex_count"] = total_vertex_count
self.variable_assignments["vertex_indices"] = target_indices
self.variable_assignments["from_range"] = from_range
def get_regularization(self):
return 0
def handle_variable_assignment(self, batch, mode):
pass
|
mit
|
Habatchii/PTVS
|
Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp1254.py
|
93
|
14065
|
""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
oihane/odoomrp-wip
|
mrp_operations_start_without_material/__openerp__.py
|
6
|
1441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'MRP Operations start without material',
'version': '8.0.1.0.1',
'author': 'OdooMRP team',
'contributors': ["Daniel Campos <[email protected]>",
"Pedro M. Baeza <[email protected]>",
"Ana Juaristi <[email protected]>"],
'website': 'http://www.odoomrp.com',
"depends": ['mrp_operations_extension'],
"category": "Manufacturing",
"data": ['views/mrp_routing_view.xml',
'views/mrp_production_view.xml'
],
"installable": True,
"application": True
}
|
agpl-3.0
|
ronojoy/BDA_py_demos
|
demos_ch5/demo5_2.py
|
19
|
3326
|
"""Bayesian Data Analysis, 3rd ed
Chapter 5, demo 2
Hierarchical model for SAT-example data (BDA3, p. 102)
"""
from __future__ import division
import numpy as np
from scipy.stats import norm
import scipy.io # For importing a matlab file
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# SAT-example data (BDA3 p. 120)
# y is the estimated treatment effect
# s is the standard error of effect estimate
y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
s = np.array([15, 10, 16, 11, 9, 11, 10, 18])
M = len(y)
# load the pre-computed results for the hierarchical model
# replace this with your own code in Ex 5.1*
hres_path = '../utilities_and_data/demo5_2.mat'
hres = scipy.io.loadmat(hres_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat('demo5_2.mat')
[('pxm', (8, 500), 'double'),
('t', (1, 1000), 'double'),
('tp', (1, 1000), 'double'),
('tsd', (8, 1000), 'double'),
('tm', (8, 1000), 'double')]
'''
pxm = hres['pxm']
t = hres['t'][0]
tp = hres['tp'][0]
tsd = hres['tsd']
tm = hres['tm']
# plot the separate, pooled and hierarchical models
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
x = np.linspace(-40, 60, 500)
# separate
lines = axes[0].plot(x, norm.pdf(x[:,None], y[1:], s[1:]), linewidth=1)
line, = axes[0].plot(x, norm.pdf(x, y[0], s[0]), 'r')
axes[0].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[0].set_yticks(())
axes[0].set_title('separate model')
# pooled
axes[1].plot(
x,
norm.pdf(
x,
np.sum(y/s**2)/np.sum(1/s**2),
np.sqrt(1/np.sum(1/s**2))
),
label='All schools'
)
axes[1].legend(loc='upper left')
axes[1].set_yticks(())
axes[1].set_title('pooled model')
# hierarchical
lines = axes[2].plot(x, pxm[1:].T, linewidth=1)
line, = axes[2].plot(x, pxm[0], 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_yticks(())
axes[2].set_title('hierarchical model')
axes[2].set_xlabel('Treatment effect')
# plot various marginal and conditional posterior summaries
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
axes[0].plot(t, tp)
axes[0].set_yticks(())
axes[0].set_title(r'marginal posterior density $p(\tau|y)$')
axes[0].set_ylabel(r'$p(\tau|y)$', fontsize=20)
axes[0].set_xlim([0,35])
lines = axes[1].plot(t, tm[1:].T, linewidth=1)
line, = axes[1].plot(t, tm[0].T, 'r')
axes[1].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[1].set_title(r'conditional posterior means of effects '
r'$\operatorname{E}(\theta_j|\tau,y)$')
axes[1].set_ylabel(r'$\operatorname{E}(\theta_j|\tau,y)$', fontsize=20)
lines = axes[2].plot(t, tsd[1:].T, linewidth=1)
line, = axes[2].plot(t, tsd[0].T, 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_title(r'standard deviations of effects '
r'$\operatorname{sd}(\theta_j|\tau,y)$')
axes[2].set_ylabel(r'$\operatorname{sd}(\theta_j|\tau,y)$', fontsize=20)
axes[2].set_xlabel(r'$\tau$', fontsize=20)
plt.show()
|
gpl-3.0
|
Tangxuguo/Django_SNS
|
osf/restapi/serializers.py
|
1
|
1410
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from accounts.models import *
from album.models import *
from comment.models import *
from notification.models import *
from post.models import *
from tag.models import *
# accout
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
# ablum
class AlbumSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Album
fields = ('id', 'user_id')
class PhotoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Photo
fields = ('id', 'key', 'album_id', 'ts', 'desc')
# comment
class CommentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Comment
fields = ('id', 'comment_object_type', 'comment_object_id', 'comment_author')
# post
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Post
fields = ('id', 'post_author', 'post_ts', 'post_content','post_title',)
# tag
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ('id', 'tag', 'add_ts', 'cover',)
|
gpl-3.0
|
myang321/django
|
tests/generic_views/test_detail.py
|
284
|
8387
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin
from .models import Artist, Author, Book, Page
@override_settings(ROOT_URLCONF='generic_views.urls')
class DetailViewTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name='Rene Magritte')
cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano')
cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg')
cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1))
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1)
)
cls.page1 = Page.objects.create(
content='I was once bitten by a moose.', template='generic_views/page_template.html'
)
def test_simple_object(self):
res = self.client.get('/detail/obj/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], {'foo': 'bar'})
self.assertIsInstance(res.context['view'], View)
self.assertTemplateUsed(res, 'generic_views/detail.html')
def test_detail_by_pk(self):
res = self.client.get('/detail/author/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_missing_object(self):
res = self.client.get('/detail/author/500/')
self.assertEqual(res.status_code, 404)
def test_detail_object_does_not_exist(self):
self.assertRaises(ObjectDoesNotExist, self.client.get, '/detail/doesnotexist/1/')
def test_detail_by_custom_pk(self):
res = self.client.get('/detail/author/bycustompk/%s/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_slug(self):
res = self.client.get('/detail/author/byslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_custom_slug(self):
res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg'))
self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg'))
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_ignore_slug_mismatch(self):
res = self.client.get('/detail/author/bypkignoreslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug(self):
res = self.client.get('/detail/author/bypkandslug/%s-roberto-bolano/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_detail_by_pk_and_slug_mismatch_404(self):
res = self.client.get('/detail/author/bypkandslug/%s-scott-rosenberg/' % self.author1.pk)
self.assertEqual(res.status_code, 404)
def test_verbose_name(self):
res = self.client.get('/detail/artist/%s/' % self.artist1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.artist1)
self.assertEqual(res.context['artist'], self.artist1)
self.assertTemplateUsed(res, 'generic_views/artist_detail.html')
def test_template_name(self):
res = self.client.get('/detail/author/%s/template_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/about.html')
def test_template_name_suffix(self):
res = self.client.get('/detail/author/%s/template_name_suffix/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['author'], self.author1)
self.assertTemplateUsed(res, 'generic_views/author_view.html')
def test_template_name_field(self):
res = self.client.get('/detail/page/%s/field/' % self.page1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.page1)
self.assertEqual(res.context['page'], self.page1)
self.assertTemplateUsed(res, 'generic_views/page_template.html')
def test_context_object_name(self):
res = self.client.get('/detail/author/%s/context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertEqual(res.context['thingy'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_duplicated_context_object_name(self):
res = self.client.get('/detail/author/%s/dupe_context_object_name/' % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], self.author1)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_detail.html')
def test_deferred_queryset_template_name(self):
class FormContext(SingleObjectTemplateResponseMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
self.assertEqual(FormContext().get_template_names()[0], 'generic_views/author_detail.html')
def test_deferred_queryset_context_object_name(self):
class FormContext(ModelFormMixin):
request = RequestFactory().get('/')
model = Author
object = Author.objects.defer('name').get(pk=self.author1.pk)
fields = ('name',)
form_context_data = FormContext().get_context_data()
self.assertEqual(form_context_data['object'], self.author1)
self.assertEqual(form_context_data['author'], self.author1)
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/')
def test_invalid_queryset(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/')
def test_non_model_object_with_meta(self):
res = self.client.get('/detail/nonmodel/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'].id, "non_model_1")
|
bsd-3-clause
|
lokeshh/stem
|
test/unit/descriptor/export.py
|
8
|
3212
|
"""
Unit tests for stem.descriptor.export.
"""
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import stem.prereq
import test.runner
from stem.descriptor.export import export_csv, export_csv_file
from test.mocking import (
get_relay_server_descriptor,
get_bridge_server_descriptor,
)
class TestExport(unittest.TestCase):
def test_minimal_descriptor(self):
"""
Exports a single minimal tor server descriptor.
"""
# we won't have a header prior to python 2.7
if not stem.prereq.is_python_27():
test.runner.skip(self, '(header added in python 2.7)')
return
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = False)
expected = 'caerSidi,71.35.133.197,2012-03-01 17:15:27\n'
self.assertEqual(expected, desc_csv)
desc_csv = export_csv(desc, included_fields = ('nickname', 'address', 'published'), header = True)
expected = 'nickname,address,published\n' + expected
self.assertEqual(expected, desc_csv)
def test_multiple_descriptors(self):
"""
Exports multiple descriptors, making sure that we get them back in the same
order.
"""
nicknames = ('relay1', 'relay3', 'relay2', 'caerSidi', 'zeus')
descriptors = []
for nickname in nicknames:
router_line = '%s 71.35.133.197 9001 0 0' % nickname
descriptors.append(get_relay_server_descriptor({'router': router_line}))
expected = '\n'.join(nicknames) + '\n'
self.assertEqual(expected, export_csv(descriptors, included_fields = ('nickname',), header = False))
def test_file_output(self):
"""
Basic test for the export_csv_file() function, checking that it provides
the same output as export_csv().
"""
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc)
csv_buffer = StringIO()
export_csv_file(csv_buffer, desc)
self.assertEqual(desc_csv, csv_buffer.getvalue())
def test_excludes_private_attr(self):
"""
Checks that the default attributes for our csv output doesn't include private fields.
"""
# we won't have a header prior to python 2.7
if not stem.prereq.is_python_27():
test.runner.skip(self, '(header added in python 2.7)')
return
desc = get_relay_server_descriptor()
desc_csv = export_csv(desc)
self.assertTrue(',signature' in desc_csv)
self.assertFalse(',_digest' in desc_csv)
self.assertFalse(',_annotation_lines' in desc_csv)
def test_empty_input(self):
"""
Exercises when we don't provide any descriptors.
"""
self.assertEqual('', export_csv([]))
def test_invalid_attributes(self):
"""
Attempts to make a csv with attributes that don't exist.
"""
desc = get_relay_server_descriptor()
self.assertRaises(ValueError, export_csv, desc, ('nickname', 'blarg!'))
def test_multiple_descriptor_types(self):
"""
Attempts to make a csv with multiple descriptor types.
"""
server_desc = get_relay_server_descriptor()
bridge_desc = get_bridge_server_descriptor()
self.assertRaises(ValueError, export_csv, (server_desc, bridge_desc))
|
lgpl-3.0
|
amitdeutsch/oppia
|
extensions/rules/real.py
|
7
|
1336
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for Reals."""
from extensions.rules import base
class Equals(base.RealRule):
description = 'is equal to {{x|Real}}'
class IsLessThan(base.RealRule):
description = 'is less than {{x|Real}}'
class IsGreaterThan(base.RealRule):
description = 'is greater than {{x|Real}}'
class IsLessThanOrEqualTo(base.RealRule):
description = 'is less than or equal to {{x|Real}}'
class IsGreaterThanOrEqualTo(base.RealRule):
description = 'is greater than or equal to {{x|Real}}'
class IsInclusivelyBetween(base.RealRule):
description = 'is between {{a|Real}} and {{b|Real}}, inclusive'
class IsWithinTolerance(base.RealRule):
description = 'is within {{tol|Real}} of {{x|Real}}'
|
apache-2.0
|
siddhika1889/Pydev-Dependencies
|
pysrc/pydev_ipython/version.py
|
142
|
1227
|
# encoding: utf-8
"""
Utilities for version comparison
It is a bit ridiculous that we need these.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from distutils.version import LooseVersion
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def check_version(v, check):
"""check version string v >= check
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to date.
"""
try:
return LooseVersion(v) >= LooseVersion(check)
except TypeError:
return True
|
epl-1.0
|
danakj/chromium
|
third_party/markupsafe/__init__.py
|
371
|
8205
|
# -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
|
bsd-3-clause
|
topdk-jnz/php-code-to-inject-into-docker
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
6497
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
gpl-2.0
|
Dahlgren/HTPC-Manager
|
libs/concurrent/futures/thread.py
|
104
|
4605
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit
import threading
import weakref
import sys
from concurrent.futures import _base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan ([email protected])'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e, tb = sys.exc_info()[1:]
self.future.set_exception_info(e, tb)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
mit
|
twobraids/configman
|
configman/converters.py
|
2
|
18916
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, division, print_function
import sys
import re
import datetime
import types
import json
import six
from configman.datetime_util import (
datetime_from_ISO_string,
date_from_ISO_string,
datetime_to_ISO_string,
date_to_ISO_string,
)
# for backward compatibility these two methods get alternate names
datetime_converter = datetime_from_ISO_string
date_converter = date_from_ISO_string
from configman.config_exceptions import CannotConvertError
#------------------------------------------------------------------------------
# Utility section
#
# various handy functions used or associated with type conversions
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def str_dict_keys(a_dict):
"""return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str.
"""
new_dict = {}
for key in a_dict:
if six.PY2 and isinstance(key, six.text_type):
new_dict[str(key)] = a_dict[key]
else:
new_dict[key] = a_dict[key]
return new_dict
#------------------------------------------------------------------------------
def str_quote_stripper(input_str):
if not isinstance(input_str, six.string_types):
raise ValueError(input_str)
while (
input_str
and input_str[0] == input_str[-1]
and input_str[0] in ("'", '"')
):
input_str = input_str.strip(input_str[0])
return input_str
#------------------------------------------------------------------------------
# from string section
#
# a set of functions that will convert from a string representation into some
# specified type
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# a bunch of known mappings of builtin items to strings
import six.moves.builtins as builtins
known_mapping_str_to_type = dict(
(key, val) for key, val in sorted(builtins.__dict__.items())
if val not in (True, False)
)
#------------------------------------------------------------------------------
from configman.datetime_util import (
str_to_timedelta, # don't worry about pyflakes here, unused in this file
timedelta_to_str, # but used elsewhere
)
timedelta_converter = str_to_timedelta # for backward compatiblity
#------------------------------------------------------------------------------
def py2_to_unicode(input_str):
if six.PY2:
input_str = six.text_type(input_str, 'utf-8')
return input_str
def py3_to_bytes(input_str):
if six.py3:
input_str = input_str.encode('utf-8')
return input_str
#------------------------------------------------------------------------------
def str_to_boolean(input_str):
""" a conversion function for boolean
"""
if not isinstance(input_str, six.string_types):
raise ValueError(input_str)
input_str = str_quote_stripper(input_str)
return input_str.lower() in ("true", "t", "1", "y", "yes")
boolean_converter = str_to_boolean # for backward compatiblity
#------------------------------------------------------------------------------
def str_to_python_object(input_str):
""" a conversion that will import a module and class name
"""
if not input_str:
return None
if six.PY3 and isinstance(input_str, six.binary_type):
input_str = to_str(input_str)
if not isinstance(input_str, six.string_types):
# gosh, we didn't get a string, we can't convert anything but strings
# we're going to assume that what we got is actually what was wanted
# as the output
return input_str
input_str = str_quote_stripper(input_str)
if '.' not in input_str and input_str in known_mapping_str_to_type:
return known_mapping_str_to_type[input_str]
parts = [x.strip() for x in input_str.split('.') if x.strip()]
try:
try:
# first try as a complete module
package = __import__(input_str)
except ImportError:
# it must be a class from a module
if len(parts) == 1:
# since it has only one part, it must be a class from __main__
parts = ('__main__', input_str)
package = __import__('.'.join(parts[:-1]), globals(), locals(), [])
obj = package
for name in parts[1:]:
obj = getattr(obj, name)
return obj
except AttributeError as x:
raise CannotConvertError("%s cannot be found" % input_str)
except ImportError as x:
raise CannotConvertError(str(x))
class_converter = str_to_python_object # for backward compatibility
#------------------------------------------------------------------------------
def str_to_classes_in_namespaces(
template_for_namespace="cls%d",
name_of_class_option='cls',
instantiate_classes=False
):
"""take a comma delimited list of class names, convert each class name
into an actual class as an option within a numbered namespace. This
function creates a closure over a new function. That new function,
in turn creates a class derived from RequiredConfig. The inner function,
'class_list_converter', populates the InnerClassList with a Namespace for
each of the classes in the class list. In addition, it puts the each class
itself into the subordinate Namespace. The requirement discovery mechanism
of configman then reads the InnerClassList's requried config, pulling in
the namespaces and associated classes within.
For example, if we have a class list like this: "Alpha, Beta", then this
converter will add the following Namespaces and options to the
configuration:
"cls0" - the subordinate Namespace for Alpha
"cls0.cls" - the option containing the class Alpha itself
"cls1" - the subordinate Namespace for Beta
"cls1.cls" - the option containing the class Beta itself
Optionally, the 'class_list_converter' inner function can embue the
InnerClassList's subordinate namespaces with aggregates that will
instantiate classes from the class list. This is a convenience to the
programmer who would otherwise have to know ahead of time what the
namespace names were so that the classes could be instantiated within the
context of the correct namespace. Remember the user could completely
change the list of classes at run time, so prediction could be difficult.
"cls0" - the subordinate Namespace for Alpha
"cls0.cls" - the option containing the class Alpha itself
"cls0.cls_instance" - an instance of the class Alpha
"cls1" - the subordinate Namespace for Beta
"cls1.cls" - the option containing the class Beta itself
"cls1.cls_instance" - an instance of the class Beta
parameters:
template_for_namespace - a template for the names of the namespaces
that will contain the classes and their
associated required config options. The
namespaces will be numbered sequentially. By
default, they will be "cls1", "cls2", etc.
class_option_name - the name to be used for the class option within
the nested namespace. By default, it will choose:
"cls1.cls", "cls2.cls", etc.
instantiate_classes - a boolean to determine if there should be an
aggregator added to each namespace that
instantiates each class. If True, then each
Namespace will contain elements for the class, as
well as an aggregator that will instantiate the
class.
"""
# these are only used within this method. No need to pollute the module
# scope with them and avoid potential circular imports
from configman.namespace import Namespace
from configman.required_config import RequiredConfig
#--------------------------------------------------------------------------
def class_list_converter(class_list_str):
"""This function becomes the actual converter used by configman to
take a string and convert it into the nested sequence of Namespaces,
one for each class in the list. It does this by creating a proxy
class stuffed with its own 'required_config' that's dynamically
generated."""
if isinstance(class_list_str, six.string_types):
class_list = [x.strip() for x in class_list_str.split(',')]
if class_list == ['']:
class_list = []
else:
raise TypeError('must be derivative of %s' % six.string_types)
#======================================================================
class InnerClassList(RequiredConfig):
"""This nested class is a proxy list for the classes. It collects
all the config requirements for the listed classes and places them
each into their own Namespace.
"""
# we're dynamically creating a class here. The following block of
# code is actually adding class level attributes to this new class
required_config = Namespace() # 1st requirement for configman
subordinate_namespace_names = [] # to help the programmer know
# what Namespaces we added
namespace_template = template_for_namespace # save the template
# for future reference
class_option_name = name_of_class_option # save the class's option
# name for the future
# for each class in the class list
for namespace_index, a_class in enumerate(class_list):
# figure out the Namespace name
namespace_name = template_for_namespace % namespace_index
subordinate_namespace_names.append(namespace_name)
# create the new Namespace
required_config[namespace_name] = Namespace()
# add the option for the class itself
required_config[namespace_name].add_option(
name_of_class_option,
#doc=a_class.__doc__ # not helpful if too verbose
default=a_class,
from_string_converter=class_converter
)
if instantiate_classes:
# add an aggregator to instantiate the class
required_config[namespace_name].add_aggregation(
"%s_instance" % name_of_class_option,
lambda c, lc, a: lc[name_of_class_option](lc)
)
@classmethod
def to_str(cls):
"""this method takes this inner class object and turns it back
into the original string of classnames. This is used
primarily as for the output of the 'help' option"""
return ', '.join(
py_obj_to_str(v[name_of_class_option].value)
for v in cls.get_required_config().values()
if isinstance(v, Namespace)
)
return InnerClassList # result of class_list_converter
return class_list_converter # result of classes_in_namespaces_converter
# for backward compatibility
classes_in_namespaces_converter = str_to_classes_in_namespaces
#------------------------------------------------------------------------------
def str_to_regular_expression(input_str):
return re.compile(input_str)
regex_converter = str_to_regular_expression # for backward compatibility
compiled_regexp_type = type(re.compile(r'x'))
#------------------------------------------------------------------------------
def str_to_list(
input_str,
item_converter=lambda x: x,
item_separator=',',
list_to_collection_converter=None,
):
""" a conversion function for list
"""
if not isinstance(input_str, six.string_types):
raise ValueError(input_str)
input_str = str_quote_stripper(input_str)
result = [
item_converter(x.strip())
for x in input_str.split(item_separator) if x.strip()
]
if list_to_collection_converter is not None:
return list_to_collection_converter(result)
return result
list_converter = str_to_list # for backward compatibility
#------------------------------------------------------------------------------
#
# To string section
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
str_to_instance_of_type_converters = {
int: int,
float: float,
str: str,
bool: boolean_converter,
dict: json.loads,
list: list_converter,
datetime.datetime: datetime_converter,
datetime.date: date_converter,
datetime.timedelta: timedelta_converter,
type: class_converter,
types.FunctionType: class_converter,
compiled_regexp_type: regex_converter,
}
if six.PY2:
str_to_instance_of_type_converters[six.text_type] = py2_to_unicode
if six.PY3:
str_to_instance_of_type_converters[six.binary_type] = py3_to_bytes
# backward compatibility
from_string_converters = str_to_instance_of_type_converters
#------------------------------------------------------------------------------
def arbitrary_object_to_string(a_thing):
"""take a python object of some sort, and convert it into a human readable
string. this function is used extensively to convert things like "subject"
into "subject_key, function -> function_key, etc."""
# is it None?
if a_thing is None:
return ''
# is it already a string?
if isinstance(a_thing, six.string_types):
return a_thing
if six.PY3 and isinstance(a_thing, six.binary_type):
try:
return a_thing.decode('utf-8')
except UnicodeDecodeError:
pass
# does it have a to_str function?
try:
return a_thing.to_str()
except (AttributeError, KeyError, TypeError):
# AttributeError - no to_str function?
# KeyError - DotDict has no to_str?
# TypeError - problem converting
# nope, no to_str function
pass
# is this a type proxy?
try:
return arbitrary_object_to_string(a_thing.a_type)
except (AttributeError, KeyError, TypeError):
#
# nope, no a_type property
pass
# is it a built in?
try:
return known_mapping_type_to_str[a_thing]
except (KeyError, TypeError):
# nope, not a builtin
pass
# is it something from a loaded module?
try:
if a_thing.__module__ not in ('__builtin__', 'builtins', 'exceptions'):
if a_thing.__module__ == "__main__":
module_name = (
sys.modules['__main__']
.__file__[:-3]
.replace('/', '.')
.strip('.')
)
else:
module_name = a_thing.__module__
return "%s.%s" % (module_name, a_thing.__name__)
except AttributeError:
# nope, not one of these
pass
# maybe it has a __name__ attribute?
try:
return a_thing.__name__
except AttributeError:
# nope, not one of these
pass
# punt and see what happens if we just cast it to string
return str(a_thing)
py_obj_to_str = arbitrary_object_to_string # for backwards compatibility
#------------------------------------------------------------------------------
def list_to_str(a_list, delimiter=', '):
return delimiter.join(to_str(x) for x in a_list)
#------------------------------------------------------------------------------
def py2_to_str(a_unicode):
return six.text_type(a_unicode)
def py3_to_str(a_bytes):
return a_bytes.decode('utf-8')
#------------------------------------------------------------------------------
known_mapping_type_to_str = {}
for key, val in sorted(builtins.__dict__.items()):
if val not in (True, False, list, dict):
try:
known_mapping_type_to_str[val] = key
except TypeError:
pass
#------------------------------------------------------------------------------
to_string_converters = {
int: str,
float: str,
str: str,
list: list_to_str,
tuple: list_to_str,
bool: lambda x: 'True' if x else 'False',
dict: json.dumps,
datetime.datetime: datetime_to_ISO_string,
datetime.date: date_to_ISO_string,
datetime.timedelta: timedelta_to_str,
type: arbitrary_object_to_string,
types.ModuleType: arbitrary_object_to_string,
types.FunctionType: arbitrary_object_to_string,
compiled_regexp_type: lambda x: x.pattern,
}
if six.PY2:
to_string_converters[six.text_type] = py2_to_str
if six.PY3:
to_string_converters[six.binary_type] = py3_to_str
#------------------------------------------------------------------------------
def to_str(a_thing):
try:
return to_string_converters[type(a_thing)](a_thing)
except KeyError:
return arbitrary_object_to_string(a_thing)
#------------------------------------------------------------------------------
converters_requiring_quotes = [eval, regex_converter]
|
mpl-2.0
|
raoulbq/scipy
|
scipy/_lib/_numpy_compat.py
|
71
|
1488
|
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
|
bsd-3-clause
|
yohanko88/gem5-DC
|
util/batch/batch.py
|
90
|
7895
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
import os, popen2, re, sys
class MyPOpen(object):
def __init__(self, cmd, input = None, output = None, bufsize = -1):
self.status = -1
if input is None:
p2c_read, p2c_write = os.pipe()
self.tochild = os.fdopen(p2c_write, 'w', bufsize)
else:
p2c_write = None
if isinstance(input, file):
p2c_read = input.fileno()
elif isinstance(input, str):
input = file(input, 'r')
p2c_read = input.fileno()
elif isinstance(input, int):
p2c_read = input
else:
raise AttributeError
if output is None:
c2p_read, c2p_write = os.pipe()
self.fromchild = os.fdopen(c2p_read, 'r', bufsize)
else:
c2p_read = None
if isinstance(output, file):
c2p_write = output.fileno()
elif isinstance(output, str):
output = file(output, 'w')
c2p_write = output.fileno()
elif isinstance(output, int):
c2p_write = output
else:
raise AttributeError
self.pid = os.fork()
if self.pid == 0:
os.dup2(p2c_read, sys.stdin.fileno())
os.dup2(c2p_write, sys.stdout.fileno())
os.dup2(c2p_write, sys.stderr.fileno())
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
os.close(p2c_read)
os.close(c2p_write)
def poll(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.status = status
return self.status
def wait(self):
if self.status < 0:
pid, status = os.waitpid(self.pid, 0)
if pid == self.pid:
self.status = status
return self.status
class oarsub:
def __init__(self):
self.walltime = None
self.queue = None
self.properties = None
# OAR 2.0 parameters only!
self.name = None
self.afterok = None
self.notify = None
self.stderr = None
self.stdout = None
self.oarhost = None
self.oarsub = 'oarsub'
self.jobid = re.compile('IdJob = (\S+)')
#self.outfile = open("jobnames.dat", "a+")
def build(self, script, args = []):
self.cmd = [ self.oarsub ]
print "args:", args
print "script:", script
if self.properties:
self.cmd.append('-p"%s"' % self.properties )
if self.queue:
self.cmd.append('-q "%s"' % self.queue)
if self.walltime:
self.cmd.append('-l walltime=%s' % self.walltime)
if script[0] != "/":
self.script = os.getcwd()
else:
self.script = script
self.cmd.extend(args)
self.cmd.append(self.script)
#cmd = [ 'ssh', '-x', self.oarhost, '"cd %s; %s"' % (os.getcwd(), self.command) ]
self.command = ' '.join(self.cmd)
print "command: [%s]" % self.command
def do(self):
oar = MyPOpen(self.cmd)
self.result = oar.fromchild.read()
ec = oar.wait()
if ec != 0 and self.oarhost:
pstdin, pstdout = os.popen4(self.command)
self.result = pstdout.read()
jobid = self.jobid.match(self.result)
if jobid == None:
print "Couldn't get jobid from [%s]" % self.result
sys.exit(1)
else:
#self.outfile.write("%d %s\n" %(int(jobid.group(1)), self.name));
#self.outfile.flush()
self.result = jobid.group(1)
return 0
class qsub:
def __init__(self):
self.afterok = None
self.hold = False
self.join = False
self.keep_stdout = False
self.keep_stderr = False
self.node_type = None
self.mail_abort = False
self.mail_begin = False
self.mail_end = False
self.name = None
self.stdout = None
self.priority = None
self.queue = None
self.pbshost = None
self.qsub = 'qsub'
self.env = {}
def build(self, script, args = []):
self.cmd = [ self.qsub ]
if self.env:
arg = '-v'
arg += ','.join([ '%s=%s' % i for i in self.env.iteritems() ])
self.cmd.append(arg)
if self.hold:
self.cmd.append('-h')
if self.stdout:
self.cmd.append('-olocalhost:' + self.stdout)
if self.keep_stdout and self.keep_stderr:
self.cmd.append('-koe')
elif self.keep_stdout:
self.cmd.append('-ko')
elif self.keep_stderr:
self.cmd.append('-ke')
else:
self.cmd.append('-kn')
if self.join:
self.cmd.append('-joe')
if self.node_type:
self.cmd.append('-lnodes=' + self.node_type)
if self.mail_abort or self.mail_begin or self.mail_end:
flags = ''
if self.mail_abort:
flags.append('a')
if self.mail_begin:
flags.append('b')
if self.mail_end:
flags.append('e')
if len(flags):
self.cmd.append('-m ' + flags)
else:
self.cmd.append('-mn')
if self.name:
self.cmd.append("-N%s" % self.name)
if self.priority:
self.cmd.append('-p' + self.priority)
if self.queue:
self.cmd.append('-q' + self.queue)
if self.afterok:
self.cmd.append('-Wdepend=afterok:%s' % self.afterok)
self.cmd.extend(args)
self.script = script
self.command = ' '.join(self.cmd + [ self.script ])
def do(self):
pbs = MyPOpen(self.cmd + [ self.script ])
self.result = pbs.fromchild.read()
ec = pbs.wait()
if ec != 0 and self.pbshost:
cmd = ' '.join(self.cmd + [ '-' ])
cmd = [ 'ssh', '-x', self.pbshost, cmd ]
self.command = ' '.join(cmd)
ssh = MyPOpen(cmd, input = self.script)
self.result = ssh.fromchild.read()
ec = ssh.wait()
return ec
|
bsd-3-clause
|
adiwgno/tes
|
Documentation/networking/cxacru-cf.py
|
14668
|
1626
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
|
gpl-2.0
|
vmarkovtsev/django
|
tests/view_tests/tests/test_csrf.py
|
253
|
3203
|
from django.test import Client, SimpleTestCase, override_settings
from django.utils.translation import override
@override_settings(ROOT_URLCONF="view_tests.urls")
class CsrfViewTests(SimpleTestCase):
def setUp(self):
super(CsrfViewTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
],
)
def test_translation(self):
"""
Test that an invalid request is rejected with a localized error message.
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
self.assertContains(response,
"CSRF verification failed. Request aborted.",
status_code=403)
with self.settings(LANGUAGE_CODE='nl'), override('en-us'):
response = self.client.post('/')
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403)
@override_settings(
SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')
)
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post('/', HTTP_X_FORWARDED_PROTO='https')
self.assertContains(response,
"You are seeing this message because this HTTPS "
"site requires a 'Referer header' to be "
"sent by your Web browser, but none was sent.",
status_code=403)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post('/')
self.assertContains(response,
"You are seeing this message because this site "
"requires a CSRF cookie when submitting forms. "
"This cookie is required for security reasons, to "
"ensure that your browser is not being hijacked "
"by third parties.",
status_code=403)
# In Django 1.10, this can be changed to TEMPLATES=[] because the code path
# that reads the TEMPLATE_* settings in that case will have been removed.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}])
def test_no_django_template_engine(self):
"""
The CSRF view doesn't depend on the TEMPLATES configuration (#24388).
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
|
bsd-3-clause
|
dudonwai/dudonsblog
|
Lib/site-packages/django/contrib/gis/gdal/envelope.py
|
477
|
7009
|
"""
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
|
mit
|
simod/geonode
|
geonode/services/enumerations.py
|
7
|
1968
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_lazy as _
AUTO = "AUTO"
OWS = "OWS"
WMS = "WMS"
WFS = "WFS"
TMS = "TMS"
CSW = "CSW"
REST_MAP = "REST_MAP"
REST_IMG = "REST_IMG"
OGP = "OGP"
HGL = "HGL"
GN_WMS = "GN_WMS"
GN_CSW = "GN_CSW"
LOCAL = "L"
CASCADED = "C"
HARVESTED = "H"
INDEXED = "I"
LIVE = "X"
OPENGEOPORTAL = "O"
SERVICE_TYPES = (
(AUTO, _('Auto-detect')),
(OWS, _('Paired WMS/WFS/WCS')),
(WMS, _('Web Map Service')),
(CSW, _('Catalogue Service')),
(REST_MAP, _('ArcGIS REST MapServer')),
(REST_IMG, _('ArcGIS REST ImageServer')),
(OGP, _('OpenGeoPortal')),
(HGL, _('Harvard Geospatial Library')),
(GN_WMS, _('GeoNode (Web Map Service)')),
(GN_CSW, _('GeoNode (Catalogue Service)')),
)
GXP_PTYPES = {
'OWS': 'gxp_wmscsource',
'WMS': 'gxp_wmscsource',
'WFS': 'gxp_wmscsource',
'WCS': 'gxp_wmscsource',
'REST_MAP': 'gxp_arcrestsource',
'REST_IMG': 'gxp_arcrestsource',
'HGL': 'gxp_hglsource',
'GN_WMS': 'gxp_geonodecataloguesource',
}
QUEUED = "QUEUED"
IN_PROCESS = "IN_PROCESS"
PROCESSED = "PROCESSED"
FAILED = "FAILED"
CANCELLED = "CANCELLED"
|
gpl-3.0
|
janusnic/dj-21v
|
unit_14/mysite/userprofiles/utils.py
|
6
|
2613
|
from django.core.exceptions import ImproperlyConfigured
# -*- coding: utf-8 -*-
import functools
try:
import urlparse
except ImportError:
from urllib import parse as urlparse # python3 support
from django.core.exceptions import SuspiciousOperation
def default_redirect(request, fallback_url, **kwargs):
"""
Evaluates a redirect url by consulting GET, POST and the session.
"""
redirect_field_name = kwargs.get("redirect_field_name", "next")
next = request.REQUEST.get(redirect_field_name)
if not next:
# try the session if available
if hasattr(request, "session"):
session_key_value = kwargs.get("session_key_value", "redirect_to")
next = request.session.get(session_key_value)
is_safe = functools.partial(
ensure_safe_url,
allowed_protocols=kwargs.get("allowed_protocols"),
allowed_host=request.get_host()
)
redirect_to = next if next and is_safe(next) else fallback_url
# perform one last check to ensure the URL is safe to redirect to. if it
# is not then we should bail here as it is likely developer error and
# they should be notified
is_safe(redirect_to, raise_on_fail=True)
return redirect_to
def ensure_safe_url(url, allowed_protocols=None, allowed_host=None, raise_on_fail=False):
if allowed_protocols is None:
allowed_protocols = ["http", "https"]
parsed = urlparse.urlparse(url)
# perform security checks to ensure no malicious intent
# (i.e., an XSS attack with a data URL)
safe = True
if parsed.scheme and parsed.scheme not in allowed_protocols:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
safe = False
if allowed_host and parsed.netloc and parsed.netloc != allowed_host:
if raise_on_fail:
raise SuspiciousOperation("Unsafe redirect to URL not matching host '%s'" % allowed_host)
safe = False
return safe
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
def get_form_class(path):
i = path.rfind('.')
module, attr = path[:i], path[i + 1:]
try:
mod = import_module(module)
# except ImportError, e: # python 2.7
except ImportError as e: # python 3.4
raise ImproperlyConfigured( 'Error loading module %s: "%s"' % (module, e))
try:
form = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a form named "%s"' % (module, attr))
return form
|
mit
|
quinonez/three.js
|
utils/converters/msgpack/msgpack/fallback.py
|
641
|
26403
|
"""Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
|
mit
|
jeffery-do/Vizdoombot
|
doom/lib/python3.5/site-packages/scipy/ndimage/io.py
|
47
|
1096
|
from __future__ import division, print_function, absolute_import
_have_pil = True
try:
from scipy.misc.pilutil import imread as _imread
except ImportError:
_have_pil = False
__all__ = ['imread']
# Use the implementation of `imread` in `scipy.misc.pilutil.imread`.
# If it weren't for the different names of the first arguments of
# ndimage.io.imread and misc.pilutil.imread, we could simplify this file
# by writing
# from scipy.misc.pilutil import imread
# Unfortunately, because the argument names are different, that
# introduces a backwards incompatibility.
def imread(fname, flatten=False, mode=None):
if _have_pil:
return _imread(fname, flatten, mode)
raise ImportError("Could not import the Python Imaging Library (PIL)"
" required to load image files. Please refer to"
" http://pillow.readthedocs.org/en/latest/installation.html"
" for installation instructions.")
if _have_pil and _imread.__doc__ is not None:
imread.__doc__ = _imread.__doc__.replace('name : str', 'fname : str')
|
mit
|
keedio/hue
|
desktop/core/ext-py/Django-1.6.10/tests/model_inheritance/tests.py
|
40
|
13501
|
from __future__ import absolute_import, unicode_literals
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection
from django.test import TestCase
from django.test.utils import CaptureQueriesContext
from django.utils import six
from .models import (Chef, CommonInfo, ItalianRestaurant, ParkingLot, Place,
Post, Restaurant, Student, StudentWorker, Supplier, Worker, MixinModel)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __unicode__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
w2 = Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(six.text_type(w1), "Worker Fred")
self.assertEqual(six.text_type(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertQuerysetEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
lambda o: o
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
self.assertRaises(AttributeError, lambda: CommonInfo.objects.all())
# A StudentWorker which does not exist is both a Student and Worker
# which does not exist.
self.assertRaises(Student.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
self.assertRaises(Worker.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
# MultipleObjectsReturned is also inherited.
# This is written out "long form", rather than using __init__/create()
# because of a bug with diamond inheritance (#10808)
sw1 = StudentWorker()
sw1.name = "Wilma"
sw1.age = 35
sw1.save()
sw2 = StudentWorker()
sw2.name = "Betty"
sw2.age = 24
sw2.save()
self.assertRaises(Student.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
self.assertRaises(Worker.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
def test_multiple_table(self):
post = Post.objects.create(title="Lorem Ipsum")
# The Post model has distinct accessors for the Comment and Link models.
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
self.assertRaises(AttributeError,
getattr, post, "attached_%(class)s_set"
)
# The Place/Restaurant/ItalianRestaurant models all exist as
# independent models. However, the subclasses also have transparent
# access to the fields of their ancestors.
# Create a couple of Places.
p1 = Place.objects.create(name="Master Shakes", address="666 W. Jersey")
p2 = Place.objects.create(name="Ace Harware", address="1013 N. Ashland")
# Test constructor for Restaurant.
r = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2
)
# Test the constructor for ItalianRestaurant.
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
ir.address = "1234 W. Elm"
ir.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
self.assertRaises(FieldError,
Restaurant.objects.filter, supplier__name="foo"
)
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
p.restaurant, Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
self.assertRaises(ItalianRestaurant.DoesNotExist,
lambda: p.restaurant.italianrestaurant
)
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
self.assertRaises(Place.DoesNotExist,
ItalianRestaurant.objects.get, name="The Noodle Void"
)
# MultipleObjectsReturned is also inherited.
self.assertRaises(Place.MultipleObjectsReturned,
Restaurant.objects.get, id__lt=12321
)
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers = [r, ir]
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
self.assertRaises(Restaurant.DoesNotExist,
lambda: p.restaurant
)
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
ir.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
park1 = ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
park2 = ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=ir
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=r.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
# The values() command also works on fields from parent models.
self.assertQuerysetEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"}
],
lambda o: o
)
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(2,
lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(1,
lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_select_related_defer(self):
"""
#23370 - Should be able to defer child fields when using
select_related() from parent to child.
"""
Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2,
)
ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
)
qs = (Restaurant.objects
.select_related("italianrestaurant")
.defer("italianrestaurant__serves_gnocchi")
.order_by("rating"))
# Test that the field was actually defered
with self.assertNumQueries(2):
objs = list(qs.all())
self.assertTrue(objs[1].italianrestaurant.serves_gnocchi)
# Test that model fields where assigned correct values
self.assertEqual(qs[0].name, 'Demon Dogs')
self.assertEqual(qs[0].rating, 2)
self.assertEqual(qs[1].italianrestaurant.name, 'Ristorante Miron')
self.assertEqual(qs[1].italianrestaurant.rating, 4)
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
def test_update_query_counts(self):
"""
Test that update queries do not generate non-necessary queries.
Refs #18304.
"""
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
with self.assertNumQueries(3):
ir.save()
def test_update_parent_filtering(self):
"""
Test that updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query.
Refs #10399
"""
supplier = Supplier.objects.create(
name='Central market',
address='610 some street'
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]['sql']
# Capture the queries executed when a subclassed model instance is saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=('name',))
for query in captured_queries:
sql = query['sql']
if 'UPDATE' in sql:
self.assertEqual(expected_sql, sql)
|
apache-2.0
|
CVL-dev/cvl-fabric-launcher
|
pyinstaller-2.1/PyInstaller/bindepend.py
|
9
|
27811
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Find external dependencies of binary libraries.
"""
import os
import sys
import re
from glob import glob
# Required for extracting eggs.
import zipfile
from PyInstaller.compat import is_win, is_unix, is_aix, is_cygwin, is_darwin, is_py26, is_py27
from PyInstaller.depend import dylib
from PyInstaller.utils import winutils
import PyInstaller.compat as compat
import PyInstaller.log as logging
logger = logging.getLogger(__file__)
seen = {}
if is_win:
if is_py26:
try:
# For Portable Python it is required to import pywintypes before
# win32api module. See for details:
# http://www.voidspace.org.uk/python/movpy/reference/win32ext.html#problems-with-win32api
import pywintypes
import win32api
except ImportError:
raise SystemExit("Error: PyInstaller for Python 2.6+ on Windows "
"needs pywin32.\r\nPlease install from "
"http://sourceforge.net/projects/pywin32/")
from PyInstaller.utils.winmanifest import RT_MANIFEST
from PyInstaller.utils.winmanifest import GetManifestResources
from PyInstaller.utils.winmanifest import Manifest
try:
from PyInstaller.utils.winmanifest import winresource
except ImportError, detail:
winresource = None
def getfullnameof(mod, xtrapath=None):
"""
Return the full path name of MOD.
MOD is the basename of a dll or pyd.
XTRAPATH is a path or list of paths to search first.
Return the full path name of MOD.
Will search the full Windows search path, as well as sys.path
"""
# TODO: Allow in import-hooks to specify additional paths where the PyInstaller
# should look for other libraries.
# SciPy/Numpy Windows builds from http://www.lfd.uci.edu/~gohlke/pythonlibs
# Contain some dlls in directory like C:\Python27\Lib\site-packages\numpy\core\
from distutils.sysconfig import get_python_lib
numpy_core_paths = [os.path.join(get_python_lib(), 'numpy', 'core')]
# In virtualenv numpy might be installed directly in real prefix path.
# Then include this path too.
if hasattr(sys, 'real_prefix'):
numpy_core_paths.append(
os.path.join(sys.real_prefix, 'Lib', 'site-packages', 'numpy', 'core')
)
# Search sys.path first!
epath = sys.path + numpy_core_paths + winutils.get_system_path()
if xtrapath is not None:
if type(xtrapath) == type(''):
epath.insert(0, xtrapath)
else:
epath = xtrapath + epath
for p in epath:
npth = os.path.join(p, mod)
if os.path.exists(npth):
return npth
# second try: lower case filename
for p in epath:
npth = os.path.join(p, mod.lower())
if os.path.exists(npth):
return npth
return ''
def _getImports_pe(pth):
"""
Find the binary dependencies of PTH.
This implementation walks through the PE header
and uses library pefile for that and supports
32/64bit Windows
"""
import PyInstaller.lib.pefile as pefile
dlls = set()
# By default library pefile parses all PE information.
# We are only interested in the list of dependent dlls.
# Performance is improved by reading only needed information.
# https://code.google.com/p/pefile/wiki/UsageExamples
pe = pefile.PE(pth, fast_load=True)
pe.parse_data_directories(directories=[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']])
# Some libraries have no other binary dependencies. Use empty list
# in that case. Otherwise pefile would return None.
# e.g. C:\windows\system32\kernel32.dll on Wine
for entry in getattr(pe, 'DIRECTORY_ENTRY_IMPORT', []):
dlls.add(entry.dll)
return dlls
def _extract_from_egg(toc):
"""
Ensure all binary modules in zipped eggs get extracted and
included with the frozen executable.
return modified table of content
"""
new_toc = []
for item in toc:
# Item is a tupple
# (mod_name, path, type)
modname, pth, typ = item
if not os.path.isfile(pth):
pth = check_extract_from_egg(pth)[0][0]
# Add value to new data structure.
new_toc.append((modname, pth, typ))
return new_toc
def Dependencies(lTOC, xtrapath=None, manifest=None):
"""
Expand LTOC to include all the closure of binary dependencies.
LTOC is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
manifest should be a winmanifest.Manifest instance on Windows, so
that all dependent assemblies can be added
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
for nm, pth, typ in lTOC:
if seen.get(nm.upper(), 0):
continue
logger.debug("Analyzing %s", pth)
seen[nm.upper()] = 1
if is_win:
for ftocnm, fn in selectAssemblies(pth, manifest):
lTOC.append((ftocnm, fn, 'BINARY'))
for lib, npth in selectImports(pth, xtrapath):
if seen.get(lib.upper(), 0) or seen.get(npth.upper(), 0):
continue
seen[npth.upper()] = 1
lTOC.append((lib, npth, 'BINARY'))
return lTOC
def pkg_resouces_get_default_cache():
"""
Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a 'Python-Eggs' subdirectory of the
'Application Data' directory. On all other systems, it's '~/.python-eggs'.
"""
# This function borrowed from setuptools/pkg_resources
egg_cache = compat.getenv('PYTHON_EGG_CACHE')
if egg_cache is not None:
return egg_cache
if os.name != 'nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE', 'HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, compat.getenv(key))
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def check_extract_from_egg(pth, todir=None):
r"""
Check if path points to a file inside a python egg file, extract the
file from the egg to a cache directory (following pkg_resources
convention) and return [(extracted path, egg file path, relative path
inside egg file)].
Otherwise, just return [(original path, None, None)].
If path points to an egg file directly, return a list with all files
from the egg formatted like above.
Example:
>>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd')
[(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd',
r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
"""
rv = []
if os.path.altsep:
pth = pth.replace(os.path.altsep, os.path.sep)
components = pth.split(os.path.sep)
for i, name in enumerate(components):
if name.lower().endswith(".egg"):
eggpth = os.path.sep.join(components[:i + 1])
if os.path.isfile(eggpth):
# eggs can also be directories!
try:
egg = zipfile.ZipFile(eggpth)
except zipfile.BadZipfile, e:
raise SystemExit("Error: %s %s" % (eggpth, e))
if todir is None:
# Use the same directory as setuptools/pkg_resources. So,
# if the specific egg was accessed before (not necessarily
# by pyinstaller), the extracted contents already exist
# (pkg_resources puts them there) and can be used.
todir = os.path.join(pkg_resouces_get_default_cache(),
name + "-tmp")
if components[i + 1:]:
members = ["/".join(components[i + 1:])]
else:
members = egg.namelist()
for member in members:
pth = os.path.join(todir, member)
if not os.path.isfile(pth):
dirname = os.path.dirname(pth)
if not os.path.isdir(dirname):
os.makedirs(dirname)
f = open(pth, "wb")
f.write(egg.read(member))
f.close()
rv.append((pth, eggpth, member))
return rv
return [(pth, None, None)]
def getAssemblies(pth):
"""
Return the dependent assemblies of a binary.
"""
if pth.lower().endswith(".manifest"):
return []
# check for manifest file
manifestnm = pth + ".manifest"
if os.path.isfile(manifestnm):
fd = open(manifestnm, "rb")
res = {RT_MANIFEST: {1: {0: fd.read()}}}
fd.close()
elif not winresource:
# resource access unavailable (needs pywin32)
return []
else:
# check the binary for embedded manifest
try:
res = GetManifestResources(pth)
except winresource.pywintypes.error, exc:
if exc.args[0] == winresource.ERROR_BAD_EXE_FORMAT:
logger.info('Cannot get manifest resource from non-PE '
'file %s', pth)
return []
raise
rv = []
if RT_MANIFEST in res and len(res[RT_MANIFEST]):
for name in res[RT_MANIFEST]:
for language in res[RT_MANIFEST][name]:
# check the manifest for dependent assemblies
try:
manifest = Manifest()
manifest.filename = ":".join([pth, str(RT_MANIFEST),
str(name), str(language)])
manifest.parse_string(res[RT_MANIFEST][name][language],
False)
except Exception, exc:
logger.error("Can not parse manifest resource %s, %s"
"from %s", name, language, pth)
logger.exception(exc)
else:
if manifest.dependentAssemblies:
logger.debug("Dependent assemblies of %s:", pth)
logger.debug(", ".join([assembly.getid()
for assembly in
manifest.dependentAssemblies]))
rv.extend(manifest.dependentAssemblies)
return rv
def selectAssemblies(pth, manifest=None):
"""
Return a binary's dependent assemblies files that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if manifest:
_depNames = set([dep.name for dep in manifest.dependentAssemblies])
for assembly in getAssemblies(pth):
if seen.get(assembly.getid().upper(), 0):
continue
if manifest and not assembly.name in _depNames:
# Add assembly as dependency to our final output exe's manifest
logger.info("Adding %s to dependent assemblies "
"of final executable", assembly.name)
manifest.dependentAssemblies.append(assembly)
_depNames.add(assembly.name)
if not dylib.include_library(assembly.name):
logger.debug("Skipping assembly %s", assembly.getid())
continue
if assembly.optional:
logger.debug("Skipping optional assembly %s", assembly.getid())
continue
files = assembly.find_files()
if files:
seen[assembly.getid().upper()] = 1
for fn in files:
fname, fext = os.path.splitext(fn)
if fext.lower() == ".manifest":
nm = assembly.name + fext
else:
nm = os.path.basename(fn)
ftocnm = nm
if assembly.language not in (None, "", "*", "neutral"):
ftocnm = os.path.join(assembly.getlanguage(),
ftocnm)
nm, ftocnm, fn = [item.encode(sys.getfilesystemencoding())
for item in
(nm,
ftocnm,
fn)]
if not seen.get(fn.upper(), 0):
logger.debug("Adding %s", ftocnm)
seen[nm.upper()] = 1
seen[fn.upper()] = 1
rv.append((ftocnm, fn))
else:
#logger.info("skipping %s part of assembly %s dependency of %s",
# ftocnm, assembly.name, pth)
pass
else:
logger.error("Assembly %s not found", assembly.getid())
return rv
def selectImports(pth, xtrapath=None):
"""
Return the dependencies of a binary that should be included.
Return a list of pairs (name, fullpath)
"""
rv = []
if xtrapath is None:
xtrapath = [os.path.dirname(pth)]
else:
assert isinstance(xtrapath, list)
xtrapath = [os.path.dirname(pth)] + xtrapath # make a copy
dlls = getImports(pth)
for lib in dlls:
if seen.get(lib.upper(), 0):
continue
if not is_win and not is_cygwin:
# all other platforms
npth = lib
lib = os.path.basename(lib)
else:
# plain win case
npth = getfullnameof(lib, xtrapath)
# now npth is a candidate lib if found
# check again for excludes but with regex FIXME: split the list
if npth:
candidatelib = npth
else:
candidatelib = lib
if not dylib.include_library(candidatelib):
if (candidatelib.find('libpython') < 0 and
candidatelib.find('Python.framework') < 0):
# skip libs not containing (libpython or Python.framework)
if not seen.get(npth.upper(), 0):
logger.debug("Skipping %s dependency of %s",
lib, os.path.basename(pth))
continue
else:
pass
if npth:
if not seen.get(npth.upper(), 0):
logger.debug("Adding %s dependency of %s",
lib, os.path.basename(pth))
rv.append((lib, npth))
else:
logger.warning("lib not found: %s dependency of %s", lib, pth)
return rv
def _getImports_ldd(pth):
"""
Find the binary dependencies of PTH.
This implementation is for ldd platforms (mostly unix).
"""
rslt = set()
if is_aix:
# Match libs of the form 'archive.a(sharedobject.so)'
# Will not match the fake lib '/unix'
lddPattern = re.compile(r"\s*(.*?)(\(.*\))")
else:
lddPattern = re.compile(r"\s*(.*?)\s+=>\s+(.*?)\s+\(.*\)")
for line in compat.exec_command('ldd', pth).splitlines():
m = lddPattern.search(line)
if m:
if is_aix:
lib = m.group(1)
name = os.path.basename(lib) + m.group(2)
else:
name, lib = m.group(1), m.group(2)
if name[:10] in ('linux-gate', 'linux-vdso'):
# linux-gate is a fake library which does not exist and
# should be ignored. See also:
# http://www.trilithium.com/johan/2005/08/linux-gate/
continue
if os.path.exists(lib):
# Add lib if it is not already found.
if lib not in rslt:
rslt.add(lib)
else:
logger.error('Can not find %s in path %s (needed by %s)',
name, lib, pth)
return rslt
def _getImports_macholib(pth):
"""
Find the binary dependencies of PTH.
This implementation is for Mac OS X and uses library macholib.
"""
from PyInstaller.lib.macholib.MachO import MachO
from PyInstaller.lib.macholib.mach_o import LC_RPATH
from PyInstaller.lib.macholib.dyld import dyld_find
rslt = set()
seen = set() # Libraries read from binary headers.
## Walk through mach binary headers.
m = MachO(pth)
for header in m.headers:
for idx, name, lib in header.walkRelocatables():
# Sometimes some libraries are present multiple times.
if lib not in seen:
seen.add(lib)
# Walk through mach binary headers and look for LC_RPATH.
# macholib can't handle @rpath. LC_RPATH has to be read
# from the MachO header.
# TODO Do we need to remove LC_RPATH from MachO load commands?
# Will it cause any harm to leave them untouched?
# Removing LC_RPATH should be implemented when getting
# files from the bincache if it is necessary.
run_paths = set()
for header in m.headers:
for command in header.commands:
# A command is a tupple like:
# (<macholib.mach_o.load_command object at 0x>,
# <macholib.mach_o.rpath_command object at 0x>,
# '../lib\x00\x00')
cmd_type = command[0].cmd
if cmd_type == LC_RPATH:
rpath = command[2]
# Remove trailing '\x00' characters.
# e.g. '../lib\x00\x00'
rpath = rpath.rstrip('\x00')
# Make rpath absolute. According to Apple doc LC_RPATH
# is always relative to the binary location.
rpath = os.path.normpath(os.path.join(os.path.dirname(pth), rpath))
run_paths.update([rpath])
## Try to find files in file system.
# In cases with @loader_path or @executable_path
# try to look in the same directory as the checked binary is.
# This seems to work in most cases.
exec_path = os.path.abspath(os.path.dirname(pth))
for lib in seen:
# Suppose that @rpath is not used for system libraries and
# using macholib can be avoided.
# macholib can't handle @rpath.
if lib.startswith('@rpath'):
lib = lib.replace('@rpath', '.') # Make path relative.
final_lib = None # Absolute path to existing lib on disk.
# Try multiple locations.
for run_path in run_paths:
# @rpath may contain relative value. Use exec_path as
# base path.
if not os.path.isabs(run_path):
run_path = os.path.join(exec_path, run_path)
# Stop looking for lib when found in first location.
if os.path.exists(os.path.join(run_path, lib)):
final_lib = os.path.abspath(os.path.join(run_path, lib))
rslt.add(final_lib)
break
# Log error if no existing file found.
if not final_lib:
logger.error('Can not find path %s (needed by %s)', lib, pth)
# Macholib has to be used to get absolute path to libraries.
else:
# macholib can't handle @loader_path. It has to be
# handled the same way as @executable_path.
# It is also replaced by 'exec_path'.
if lib.startswith('@loader_path'):
lib = lib.replace('@loader_path', '@executable_path')
try:
lib = dyld_find(lib, executable_path=exec_path)
rslt.add(lib)
except ValueError:
logger.error('Can not find path %s (needed by %s)', lib, pth)
return rslt
def getImports(pth):
"""
Forwards to the correct getImports implementation for the platform.
"""
if is_win or is_cygwin:
if pth.lower().endswith(".manifest"):
return []
try:
return _getImports_pe(pth)
except Exception, exception:
# Assemblies can pull in files which aren't necessarily PE,
# but are still needed by the assembly. Any additional binary
# dependencies should already have been handled by
# selectAssemblies in that case, so just warn, return an empty
# list and continue.
if logger.isEnabledFor(logging.WARN):
# logg excaption only if level >= warn
logger.warn('Can not get binary dependencies for file: %s', pth)
logger.exception(exception)
return []
elif is_darwin:
return _getImports_macholib(pth)
else:
return _getImports_ldd(pth)
def findLibrary(name):
"""
Look for a library in the system.
Emulate the algorithm used by dlopen.
`name`must include the prefix, e.g. ``libpython2.4.so``
"""
assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)"
lib = None
# Look in the LD_LIBRARY_PATH according to platform.
if is_aix:
lp = compat.getenv('LIBPATH', '')
elif is_darwin:
lp = compat.getenv('DYLD_LIBRARY_PATH', '')
else:
lp = compat.getenv('LD_LIBRARY_PATH', '')
for path in lp.split(os.pathsep):
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# Look in /etc/ld.so.cache
# TODO Look for ldconfig in /usr/sbin/ldconfig. /sbin is deprecated
# in recent linux distributions.
# Solaris does not have /sbin/ldconfig. Just check if this file exists.
if lib is None and os.path.exists('/sbin/ldconfig'):
expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name)
m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p'))
if m:
lib = m.group(0)
# Look in the known safe paths
if lib is None:
paths = ['/lib', '/lib32', '/lib64', '/usr/lib', '/usr/lib32', '/usr/lib64']
# On Debian/Ubuntu /usr/bin/python is linked statically with libpython.
# Newer Debian/Ubuntu with multiarch support putsh the libpythonX.Y.so
# To paths like /usr/lib/i386-linux-gnu/.
try:
import sysconfig # Module available only in Python 2.7.
arch_subdir = sysconfig.get_config_var('multiarchsubdir')
# Ignore if None is returned.
if arch_subdir:
arch_subdir = os.path.basename(arch_subdir)
paths.extend([
os.path.join('/usr/lib', arch_subdir),
os.path.join('/usr/lib32', arch_subdir),
os.path.join('/usr/lib64', arch_subdir),
])
except ImportError:
pass
if is_aix:
paths.append('/opt/freeware/lib')
for path in paths:
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# give up :(
if lib is None:
return None
# Resolve the file name into the soname
dir = os.path.dirname(lib)
return os.path.join(dir, getSoname(lib))
def getSoname(filename):
"""
Return the soname of a library.
"""
cmd = ["objdump", "-p", "-j", ".dynamic", filename]
m = re.search(r'\s+SONAME\s+([^\s]+)', compat.exec_command(*cmd))
if m:
return m.group(1)
def get_python_library_path():
"""
Find dynamic Python library that will be bundled with frozen executable.
Return full path to Python dynamic library or None when not found.
We need to know name of the Python dynamic library for the bootloader.
Bootloader has to know what library to load and not trying to guess.
Some linux distributions (e.g. debian-based) statically build the
Python executable to the libpython, so bindepend doesn't include
it in its output. In this situation let's try to find it.
Darwin custom builds could possibly also have non-framework style libraries,
so this method also checks for that variant as well.
"""
pyver = sys.version_info[:2]
if is_win:
names = ('python%d%d.dll' % pyver,)
elif is_cygwin:
names = ('libpython%d%d.dll' % pyver,)
elif is_darwin:
names = ('Python', '.Python', 'libpython%d.%d.dylib' % pyver)
elif is_aix:
# Shared libs on AIX are archives with shared object members, thus the ".a" suffix.
names = ('libpython%d.%d.a' % pyver,)
elif is_unix:
# Other *nix platforms.
names = ('libpython%d.%d.so.1.0' % pyver,)
else:
raise SystemExit('Your platform is not yet supported.')
# Try to get Python library name from the Python executable. It assumes that Python
# library is not statically linked.
dlls = getImports(sys.executable)
for filename in dlls:
for name in names:
if os.path.basename(filename) == name:
# On Windows filename is just like 'python27.dll'. Convert it
# to absolute path.
if is_win and not os.path.isabs(filename):
filename = getfullnameof(filename)
# Python library found. Return absolute path to it.
return filename
# Python library NOT found. Resume searching using alternative methods.
# Applies only to non Windows platforms.
if is_unix:
for name in names:
python_libname = findLibrary(name)
if python_libname:
return python_libname
elif is_darwin:
# On MacPython, Analysis.assemble is able to find the libpython with
# no additional help, asking for sys.executable dependencies.
# However, this fails on system python, because the shared library
# is not listed as a dependency of the binary (most probably it's
# opened at runtime using some dlopen trickery).
# This happens on Mac OS X when Python is compiled as Framework.
# Python compiled as Framework contains same values in sys.prefix
# and exec_prefix. That's why we can use just sys.prefix.
# In virtualenv PyInstaller is not able to find Python library.
# We need special care for this case.
if compat.is_virtualenv:
py_prefix = compat.venv_real_prefix
else:
py_prefix = sys.prefix
for name in names:
full_path = os.path.join(py_prefix, name)
if os.path.exists(full_path):
return full_path
# Python library NOT found. Return just None.
return None
|
gpl-3.0
|
bdupharm/sqlalchemy
|
test/orm/test_deferred.py
|
2
|
29273
|
import sqlalchemy as sa
from sqlalchemy import testing, util
from sqlalchemy.orm import mapper, deferred, defer, undefer, Load, \
load_only, undefer_group, create_session, synonym, relationship, Session,\
joinedload, defaultload, aliased, contains_eager, with_polymorphic
from sqlalchemy.testing import eq_, AssertsCompiledSQL, assert_raises_message
from test.orm import _fixtures
from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, \
Boss, Machine, Paperwork, _Polymorphic
class DeferredTest(AssertsCompiledSQL, _fixtures.FixtureTest):
def test_basic(self):
"""A basic deferred load."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
o = Order()
self.assert_(o.description is None)
q = create_session().query(Order).order_by(Order.id)
def go():
l = q.all()
o2 = l[2]
x = o2.description
self.sql_eq_(go, [
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.description AS orders_description "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
def test_defer_primary_key(self):
"""what happens when we try to defer the primary key?"""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'id': deferred(orders.c.id)})
# right now, it's not that graceful :)
q = create_session().query(Order)
assert_raises_message(
sa.exc.NoSuchColumnError,
"Could not locate",
q.first
)
def test_unsaved(self):
"""Deferred loading does not kick in when just PK cols are set."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_synonym_group_bug(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'isopen':synonym('_isopen', map_column=True),
'description':deferred(orders.c.description, group='foo')
})
sess = create_session()
o1 = sess.query(Order).get(1)
eq_(o1.description, "order 1")
def test_unsaved_2(self):
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group(self):
"""Deferred loading doesn't kick in when just PK cols are set"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group_2(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_save(self):
Order, orders = self.classes.Order, self.tables.orders
m = mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o2 = sess.query(Order).get(2)
o2.isopen = 1
sess.flush()
def test_group(self):
"""Deferred load with a group"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('addrident', deferred(orders.c.address_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='primary'))
]))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.id AS orders_id "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
o2 = q.all()[2]
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
o2.description = 'order 3'
def go():
sess.flush()
self.sql_count_(0, go)
def test_preserve_changes(self):
"""A deferred load operation doesn't revert modifications on attributes"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')
})
sess = create_session()
o = sess.query(Order).get(3)
assert 'userident' not in o.__dict__
o.description = 'somenewdescription'
eq_(o.description, 'somenewdescription')
def go():
eq_(o.opened, 1)
self.assert_sql_count(testing.db, go, 1)
eq_(o.description, 'somenewdescription')
assert o in sess.dirty
def test_commits_state(self):
"""
When deferred elements are loaded via a group, they get the proper
CommittedState and don't result in changes being committed
"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')})
sess = create_session()
o2 = sess.query(Order).get(3)
# this will load the group of attributes
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
# this will mark it as 'dirty', but nothing actually changed
o2.description = 'order 3'
# therefore the flush() shouldn't actually issue any SQL
self.assert_sql_count(testing.db, sess.flush, 0)
def test_map_selectable_wo_deferred(self):
"""test mapping to a selectable with deferred cols,
the selectable doesn't include the deferred col.
"""
Order, orders = self.classes.Order, self.tables.orders
order_select = sa.select([
orders.c.id,
orders.c.user_id,
orders.c.address_id,
orders.c.description,
orders.c.isopen]).alias()
mapper(Order, order_select, properties={
'description':deferred(order_select.c.description)
})
sess = Session()
o1 = sess.query(Order).order_by(Order.id).first()
assert 'description' not in o1.__dict__
eq_(o1.description, 'order 1')
class DeferredOptionsTest(AssertsCompiledSQL, _fixtures.FixtureTest):
__dialect__ = 'default'
def test_options(self):
"""Options on a mapper to create deferred and undeferred columns"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).order_by(Order.id).options(defer('user_id'))
def go():
q.all()[0].user_id
self.sql_eq_(go, [
("SELECT orders.id AS orders_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.user_id AS orders_user_id "
"FROM orders WHERE orders.id = :param_1",
{'param_1':1})])
sess.expunge_all()
q2 = q.options(undefer('user_id'))
self.sql_eq_(q2.all, [
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_group(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='primary'))
]
))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.options(undefer_group('primary')).all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_group_multi(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='secondary'))
]
))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.options(
undefer_group('primary'), undefer_group('secondary')).all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_group_multi_pathed(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='secondary'))
]
))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.options(
Load(Order).undefer_group('primary').undefer_group('secondary')).all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_star(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id)),
('description', deferred(orders.c.description)),
('opened', deferred(orders.c.isopen))
]
))
sess = create_session()
q = sess.query(Order).options(Load(Order).undefer('*'))
self.assert_compile(q,
"SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id FROM orders"
)
def test_locates_col(self):
"""changed in 1.0 - we don't search for deferred cols in the result
now. """
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o1 = (sess.query(Order).
order_by(Order.id).
add_column(orders.c.description).first())[0]
def go():
eq_(o1.description, 'order 1')
# prior to 1.0 we'd search in the result for this column
# self.sql_count_(0, go)
self.sql_count_(1, go)
def test_locates_col_rowproc_only(self):
"""changed in 1.0 - we don't search for deferred cols in the result
now.
Because the loading for ORM Query and Query from a core select
is now split off, we test loading from a plain select()
separately.
"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
stmt = sa.select([Order]).order_by(Order.id)
o1 = (sess.query(Order).
from_statement(stmt).all())[0]
def go():
eq_(o1.description, 'order 1')
# prior to 1.0 we'd search in the result for this column
# self.sql_count_(0, go)
self.sql_count_(1, go)
def test_deep_options(self):
users, items, order_items, Order, Item, User, orders = (self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(Item, items, properties=dict(
description=deferred(items.c.description)))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items)))
mapper(User, users, properties=dict(
orders=relationship(Order, order_by=orders.c.id)))
sess = create_session()
q = sess.query(User).order_by(User.id)
l = q.all()
item = l[0].orders[1].items[1]
def go():
eq_(item.description, 'item 4')
self.sql_count_(1, go)
eq_(item.description, 'item 4')
sess.expunge_all()
l = q.options(undefer('orders.items.description')).all()
item = l[0].orders[1].items[1]
def go():
eq_(item.description, 'item 4')
self.sql_count_(0, go)
eq_(item.description, 'item 4')
def test_path_entity(self):
"""test the legacy *addl_attrs argument."""
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
users = self.tables.users
orders = self.tables.orders
items = self.tables.items
order_items = self.tables.order_items
mapper(User, users, properties={
"orders": relationship(Order, lazy="joined")
})
mapper(Order, orders, properties={
"items": relationship(Item, secondary=order_items, lazy="joined")
})
mapper(Item, items)
sess = create_session()
exp = ("SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 "
"ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id")
q = sess.query(User).options(defer(User.orders, Order.items, Item.description))
self.assert_compile(q, exp)
def test_chained_multi_col_options(self):
users, User = self.tables.users, self.classes.User
orders, Order = self.tables.orders, self.classes.Order
mapper(User, users, properties={
"orders": relationship(Order)
})
mapper(Order, orders)
sess = create_session()
q = sess.query(User).options(
joinedload(User.orders).defer("description").defer("isopen")
)
self.assert_compile(q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id FROM users "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id"
)
def test_load_only_no_pk(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).options(load_only("isopen", "description"))
self.assert_compile(q,
"SELECT orders.id AS orders_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders")
def test_load_only_no_pk_rt(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).order_by(Order.id).\
options(load_only("isopen", "description"))
eq_(q.first(), Order(id=1))
def test_load_only_w_deferred(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
"description": deferred(orders.c.description)
})
sess = create_session()
q = sess.query(Order).options(
load_only("isopen", "description"),
undefer("user_id")
)
self.assert_compile(q,
"SELECT orders.description AS orders_description, "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.isopen AS orders_isopen FROM orders")
def test_load_only_propagate_unbound(self):
self._test_load_only_propagate(False)
def test_load_only_propagate_bound(self):
self._test_load_only_propagate(True)
def _test_load_only_propagate(self, use_load):
User = self.classes.User
Address = self.classes.Address
users = self.tables.users
addresses = self.tables.addresses
mapper(User, users, properties={
"addresses": relationship(Address)
})
mapper(Address, addresses)
sess = create_session()
expected = [
("SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id IN (:id_1, :id_2)", {'id_2': 8, 'id_1': 7}),
("SELECT addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 7}),
("SELECT addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 8}),
]
if use_load:
opt = Load(User).defaultload(User.addresses).load_only("id", "email_address")
else:
opt = defaultload(User.addresses).load_only("id", "email_address")
q = sess.query(User).options(opt).filter(User.id.in_([7, 8]))
def go():
for user in q:
user.addresses
self.sql_eq_(go, expected)
def test_load_only_parent_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
mapper(User, users)
mapper(Address, addresses)
mapper(Order, orders)
sess = create_session()
q = sess.query(User, Order, Address).options(
Load(User).load_only("name"),
Load(Order).load_only("id"),
Load(Address).load_only("id", "email_address")
)
self.assert_compile(q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders.id AS orders_id, "
"addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM users, orders, addresses"
)
def test_load_only_path_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
mapper(User, users, properties=util.OrderedDict([
("addresses", relationship(Address, lazy="joined")),
("orders", relationship(Order, lazy="joined"))
]))
mapper(Address, addresses)
mapper(Order, orders)
sess = create_session()
q = sess.query(User).options(
load_only("name").defaultload("addresses").load_only("id", "email_address"),
defaultload("orders").load_only("id")
)
# hmmmm joinedload seems to be forcing users.id into here...
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id FROM users "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id"
)
class InheritanceTest(_Polymorphic):
__dialect__ = 'default'
def test_load_only_subclass(self):
s = Session()
q = s.query(Manager).order_by(Manager.person_id).\
options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY managers.person_id"
)
def test_load_only_subclass_and_superclass(self):
s = Session()
q = s.query(Boss).order_by(Person.person_id).\
options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id JOIN boss "
"ON managers.person_id = boss.boss_id ORDER BY people.person_id"
)
def test_load_only_alias_subclass(self):
s = Session()
m1 = aliased(Manager, flat=True)
q = s.query(m1).order_by(m1.person_id).\
options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers_1.person_id AS managers_1_person_id, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name "
"FROM people AS people_1 JOIN managers AS "
"managers_1 ON people_1.person_id = managers_1.person_id "
"ORDER BY managers_1.person_id"
)
def test_load_only_subclass_from_relationship_polymorphic(self):
s = Session()
wp = with_polymorphic(Person, [Manager], flat=True)
q = s.query(Company).join(Company.employees.of_type(wp)).options(
contains_eager(Company.employees.of_type(wp)).
load_only(wp.Manager.status, wp.Manager.manager_name)
)
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name, "
"companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people AS people_1 LEFT OUTER JOIN "
"managers AS managers_1 ON people_1.person_id = "
"managers_1.person_id) ON companies.company_id = "
"people_1.company_id"
)
def test_load_only_subclass_from_relationship(self):
s = Session()
from sqlalchemy import inspect
inspect(Company).add_property("managers", relationship(Manager))
q = s.query(Company).join(Company.managers).options(
contains_eager(Company.managers).
load_only("status", "manager_name")
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM companies JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id"
)
def test_defer_on_wildcard_subclass(self):
# pretty much the same as load_only except doesn't
# exclude the primary key
s = Session()
q = s.query(Manager).order_by(Person.person_id).options(
defer(".*"), undefer("status"))
self.assert_compile(
q,
"SELECT managers.status AS managers_status "
"FROM people JOIN managers ON "
"people.person_id = managers.person_id ORDER BY people.person_id"
)
def test_defer_super_name_on_subclass(self):
s = Session()
q = s.query(Manager).order_by(Person.person_id).options(defer("name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.type AS people_type, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY people.person_id"
)
|
mit
|
Medium/phantomjs-1
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/logtesting.py
|
124
|
9435
|
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports the unit-testing of logging code.
Provides support for unit-testing messages logged using the built-in
logging module.
Inherit from the LoggingTestCase class for basic testing needs. For
more advanced needs (e.g. unit-testing methods that configure logging),
see the TestLogStream class, and perhaps also the LogTesting class.
"""
import logging
import unittest2 as unittest
class TestLogStream(object):
"""Represents a file-like object for unit-testing logging.
This is meant for passing to the logging.StreamHandler constructor.
Log messages captured by instances of this object can be tested
using self.assertMessages() below.
"""
def __init__(self, test_case):
"""Create an instance.
Args:
test_case: A unittest.TestCase instance.
"""
self._test_case = test_case
self.messages = []
"""A list of log messages written to the stream."""
# Python documentation says that any object passed to the StreamHandler
# constructor should support write() and flush():
#
# http://docs.python.org/library/logging.html#module-logging.handlers
def write(self, message):
self.messages.append(message)
def flush(self):
pass
def assertMessages(self, messages):
"""Assert that the given messages match the logged messages.
messages: A list of log message strings.
"""
self._test_case.assertEqual(messages, self.messages)
class LogTesting(object):
"""Supports end-to-end unit-testing of log messages.
Sample usage:
class SampleTest(unittest.TestCase):
def setUp(self):
self._log = LogTesting.setUp(self) # Turn logging on.
def tearDown(self):
self._log.tearDown() # Turn off and reset logging.
def test_logging_in_some_method(self):
call_some_method() # Contains calls to _log.info(), etc.
# Check the resulting log messages.
self._log.assertMessages(["INFO: expected message #1",
"WARNING: expected message #2"])
"""
def __init__(self, test_stream, handler):
"""Create an instance.
This method should never be called directly. Instances should
instead be created using the static setUp() method.
Args:
test_stream: A TestLogStream instance.
handler: The handler added to the logger.
"""
self._test_stream = test_stream
self._handler = handler
@staticmethod
def _getLogger():
"""Return the logger being tested."""
# It is possible we might want to return something other than
# the root logger in some special situation. For now, the
# root logger seems to suffice.
return logging.getLogger()
@staticmethod
def setUp(test_case, logging_level=logging.INFO):
"""Configure logging for unit testing.
Configures the root logger to log to a testing log stream.
Only messages logged at or above the given level are logged
to the stream. Messages logged to the stream are formatted
in the following way, for example--
"INFO: This is a test log message."
This method should normally be called in the setUp() method
of a unittest.TestCase. See the docstring of this class
for more details.
Returns:
A LogTesting instance.
Args:
test_case: A unittest.TestCase instance.
logging_level: An integer logging level that is the minimum level
of log messages you would like to test.
"""
stream = TestLogStream(test_case)
handler = logging.StreamHandler(stream)
handler.setLevel(logging_level)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
# Notice that we only change the root logger by adding a handler
# to it. In particular, we do not reset its level using
# logger.setLevel(). This ensures that we have not interfered
# with how the code being tested may have configured the root
# logger.
logger = LogTesting._getLogger()
logger.addHandler(handler)
return LogTesting(stream, handler)
def tearDown(self):
"""Assert there are no remaining log messages, and reset logging.
This method asserts that there are no more messages in the array of
log messages, and then restores logging to its original state.
This method should normally be called in the tearDown() method of a
unittest.TestCase. See the docstring of this class for more details.
"""
self.assertMessages([])
logger = LogTesting._getLogger()
logger.removeHandler(self._handler)
def messages(self):
"""Return the current list of log messages."""
return self._test_stream.messages
# FIXME: Add a clearMessages() method for cases where the caller
# deliberately doesn't want to assert every message.
# We clear the log messages after asserting since they are no longer
# needed after asserting. This serves two purposes: (1) it simplifies
# the calling code when we want to check multiple logging calls in a
# single test method, and (2) it lets us check in the tearDown() method
# that there are no remaining log messages to be asserted.
#
# The latter ensures that no extra log messages are getting logged that
# the caller might not be aware of or may have forgotten to check for.
# This gets us a bit more mileage out of our tests without writing any
# additional code.
def assertMessages(self, messages):
"""Assert the current array of log messages, and clear its contents.
Args:
messages: A list of log message strings.
"""
try:
self._test_stream.assertMessages(messages)
finally:
# We want to clear the array of messages even in the case of
# an Exception (e.g. an AssertionError). Otherwise, another
# AssertionError can occur in the tearDown() because the
# array might not have gotten emptied.
self._test_stream.messages = []
# This class needs to inherit from unittest.TestCase. Otherwise, the
# setUp() and tearDown() methods will not get fired for test case classes
# that inherit from this class -- even if the class inherits from *both*
# unittest.TestCase and LoggingTestCase.
#
# FIXME: Rename this class to LoggingTestCaseBase to be sure that
# the unittest module does not interpret this class as a unittest
# test case itself.
class LoggingTestCase(unittest.TestCase):
"""Supports end-to-end unit-testing of log messages.
Sample usage:
class SampleTest(LoggingTestCase):
def test_logging_in_some_method(self):
call_some_method() # Contains calls to _log.info(), etc.
# Check the resulting log messages.
self.assertLog(["INFO: expected message #1",
"WARNING: expected message #2"])
"""
def setUp(self):
self._log = LogTesting.setUp(self)
def tearDown(self):
self._log.tearDown()
def logMessages(self):
"""Return the current list of log messages."""
return self._log.messages()
# FIXME: Add a clearMessages() method for cases where the caller
# deliberately doesn't want to assert every message.
# See the code comments preceding LogTesting.assertMessages() for
# an explanation of why we clear the array of messages after
# asserting its contents.
def assertLog(self, messages):
"""Assert the current array of log messages, and clear its contents.
Args:
messages: A list of log message strings.
"""
self._log.assertMessages(messages)
|
bsd-3-clause
|
bblacey/FreeCAD-MacOS-CI
|
src/Mod/Import/App/SCL/AggregationDataTypes.py
|
29
|
27474
|
# Copyright (c) 2011, Thomas Paviot ([email protected])
# All rights reserved.
# This file is part of the StepClassLibrary (SCL).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SimpleDataTypes import *
from TypeChecker import check_type
import BaseType
class BaseAggregate(object):
""" A class that define common properties to ARRAY, LIST, SET and BAG.
"""
def __init__( self , bound1 , bound2 , base_type ):
# check that bound1<bound2
if (bound1!=None and bound2!=None):
if bound1>bound2:
raise AssertionError("bound1 shall be less than or equal to bound2")
self._bound1 = bound1
self._bound2 = bound2
self._base_type = base_type
def __getitem__(self, index):
if index<self._bound1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound1,index))
elif(self._bound2!=None and index>self._bound2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound2,index))
else:
return list.__getitem__(self,index)
def __setitem__(self,index,value):
if index<self._bound1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound1,index))
elif (self._bound2!=None and index>self._bound2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound2,index))
elif not isinstance(value,self._base_type):
raise TypeError("%s type expected, passed %s."%(self._base_type, type(value)))
else:
# first find the length of the list, and extend it if ever
# the index is
list.__setitem__(self,index,value)
class ARRAY(BaseType.Type, BaseType.Aggregate):
"""
EXPRESS definition:
==================
An array data type has as its domain indexed, fixed-size collections of like elements. The lower
and upper bounds, which are integer-valued expressions, define the range of index values, and
thus the size of each array collection.
An array data type definition may optionally specify
that an array value cannot contain duplicate elements.
It may also specify that an array value
need not contain an element at every index position.
Given that m is the lower bound and n is the upper bound, there are exactly n-m+1 elements
in the array. These elements are indexed by subscripts from m to n, inclusive (see 12.6.1).
NOTE 1 { The bounds may be positive, negative or zero, but may not be indeterminate (?) (see
14.2).
Syntax:
165 array_type = ARRAY bound_spec OF [ OPTIONAL ] [ UNIQUE ] base_type .
176 bound_spec = '[' bound_1 ':' bound_2 ']' .
174 bound_1 = numeric_expression .
175 bound_2 = numeric_expression .
171 base_type = aggregation_types | simple_types | named_types .
Given that m is the lower bound and n is the upper bound, there are exactly n-m+1 elements
in the array. These elements are indexed by subscripts from m to n, inclusive (see 12.6.1).
NOTE 1 { The bounds may be positive, negative or zero, but may not be indeterminate (?) (see
14.2).
Rules and restrictions:
a) Both expressions in the bound specification, bound_1 and bound_2, shall evaluate to
integer values. Neither shall evaluate to the indeterminate (?) value.
b) bound_1 gives the lower bound of the array. This shall be the lowest index which is
valid for an array value of this data type.
c) bound_2 gives the upper bound of the array. This shall be the highest index which is
valid for an array value of this data type.
d) bound_1 shall be less than or equal to bound_2.
e) If the optional keyword is specified, an array value of this data type may have the
indeterminate (?) value at one or more index positions.
f) If the optional keyword is not specified, an array value of this data type shall not
contain an indeterminate (?) value at any index position.
g) If the unique keyword is specified, each element in an array value of this data type
shall be different from (i.e., not instance equal to) every other element in the same array
value.
NOTE 2 : Both optional and unique may be specified in the same array data type definition.
This does not preclude multiple indeterminate (?) values from occurring in a single array value.
This is because comparisons between indeterminate (?) values result in unknown so the uniqueness
constraint is not violated.
EXAMPLE 27 : This example shows how a multi-dimensioned array is declared.
sectors : ARRAY [ 1 : 10 ] OF -- first dimension
ARRAY [ 11 : 14 ] OF -- second dimension
UNIQUE something;
The first array has 10 elements of data type ARRAY[11:14] OF UNIQUE something. There is
a total of 40 elements of data type something in the attribute named sectors. Within each
ARRAY[11:14], no duplicates may occur; however, the same something instance may occur in two
different ARRAY[11:14] values within a single value for the attribute named sectors.
Python definition:
==================
@TODO
"""
def __init__( self , bound_1 , bound_2 , base_type , UNIQUE = False, OPTIONAL=False, scope = None):
BaseType.Type.__init__(self, base_type, scope)
if not type(bound_1)==int:
raise TypeError("ARRAY lower bound must be an integer")
if not type(bound_2)==int:
raise TypeError("ARRAY upper bound must be an integer")
if not (bound_1 <= bound_2):
raise AssertionError("ARRAY lower bound must be less than or equal to upper bound")
# set up class attributes
self._bound_1 = bound_1
self._bound_2 = bound_2
self._unique = UNIQUE
self._optional = OPTIONAL
# preallocate list elements
list_size = bound_2 - bound_1 + 1
self._container = list_size*[None]
def bound_1(self):
return self._bound_1
def bound_2(self):
return self._bound_2
def get_hiindex(self):
return INTEGER(self._bound_2)
def get_loindex(self):
return INTEGER(self._bound_1)
def get_hibound(self):
return INTEGER(self._bound_2)
def get_lobound(self):
return INTEGER(self._bound_1)
def get_size(self):
return INTEGER(self._bound_2 - self._bound_1 +1)
def get_value_unique(self):
''' Return True if all items are different in the container, UNKNOWN if some items are
indeterminate, or False otherwise'''
if None in self._container:
return Unknown
if self.get_size()-len(set(self._container))>0: #some items are repeated
return False
else:
return True
def __getitem__(self, index):
if index<self._bound_1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound_1,index))
elif(index>self._bound_2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound_2,index))
else:
value = self._container[index-self._bound_1]
if not self._optional and value==None:
raise AssertionError("Not OPTIONAL prevent the value with index %i from being None (default). Please set the value first."%index)
return value
def __setitem__(self, index, value):
if index<self._bound_1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound_1,index))
elif(index>self._bound_2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound_2,index))
else:
# first check the type of the value
check_type(value,self.get_type())
# then check if the value is already in the array
if self._unique:
if value in self._container:
raise AssertionError("UNIQUE keyword prevents inserting this instance.")
self._container[index-self._bound_1] = value
class LIST(BaseType.Type, BaseType.Aggregate):
"""
EXPRESS definition:
==================
A list data type has as its domain sequences of like elements. The optional lower and upper
bounds, which are integer-valued expressions, define the minimum and maximum number of
elements that can be held in the collection defined by a list data type.
A list data type
definition may optionally specify that a list value cannot contain duplicate elements.
Syntax:
237 list_type = LIST [ bound_spec ] OF [ UNIQUE ] base_type .
176 bound_spec = '[' bound_1 ':' bound_2 ']' .
174 bound_1 = numeric_expression .
175 bound_2 = numeric_expression .
171 base_type = aggregation_types | simple_types | named_types .
Rules and restrictions:
a) The bound_1 expression shall evaluate to an integer value greater than or equal to
zero. It gives the lower bound, which is the minimum number of elements that can be in a
list value of this data type. bound_1 shall not produce the indeterminate (?) value.
b) The bound_2 expression shall evaluate to an integer value greater than or equal to
bound_1, or an indeterminate (?) value. It gives the upper bound, which is the maximum
number of elements that can be in a list value of this data type.
If this value is indeterminate (?) the number of elements in a list value of this data type is
not bounded from above.
c) If the bound_spec is omitted, the limits are [0:?].
d) If the unique keyword is specified, each element in a list value of this data type shall
be different from (i.e., not instance equal to) every other element in the same list value.
EXAMPLE 28 { This example defines a list of arrays. The list can contain zero to ten arrays. Each
array of ten integers shall be different from all other arrays in a particular list.
complex_list : LIST[0:10] OF UNIQUE ARRAY[1:10] OF INTEGER;
Python definition:
==================
@TODO
"""
def __init__( self , bound_1 , bound_2 , base_type , UNIQUE = False, scope = None):
BaseType.Type.__init__(self, base_type, scope)
if not type(bound_1)==int:
raise TypeError("LIST lower bound must be an integer")
# bound_2 can be set to None
self._unbounded = False
if bound_2 == None:
self._unbounded = True
elif not type(bound_2)==int:
raise TypeError("LIST upper bound must be an integer")
if not bound_1>=0:
raise AssertionError("LIST lower bound must be greater of equal to 0")
if (type(bound_2)==int and not (bound_1 <= bound_2)):
raise AssertionError("ARRAY lower bound must be less than or equal to upper bound")
# set up class attributes
self._bound_1 = bound_1
self._bound_2 = bound_2
self._unique = UNIQUE
# preallocate list elements if bounds are both integers
if not self._unbounded:
list_size = bound_2 - bound_1 + 1
self._container = list_size*[None]
# for unbounded list, this will come after
else:
self._container = [None]
def bound_1(self):
return self._bound_1
def bound_2(self):
return self._bound_2
def get_size(self):
number_of_indeterminates = self._container.count(None)
hiindex = len(self._container) - number_of_indeterminates
return INTEGER(hiindex)
def get_hiindex(self):
''' When V is a bag, list or set, the returned value is the actual number of elements in
the aggregate value.'''
number_of_indeterminates = self._container.count(None)
hiindex = len(self._container) - number_of_indeterminates
return INTEGER(hiindex)
def get_loindex(self):
return INTEGER(1)
def get_hibound(self):
hibound = self._bound_2
if type(hibound)==int:
return INTEGER(hibound)
else:
return hibound
def get_lobound(self):
lobound = self._bound_1
if type(lobound)==int:
return INTEGER(lobound)
else:
return lobound
def get_value_unique(self):
''' Return True if all items are different in the container, UNKNOWN if some items are
indeterminate, or False otherwise'''
if None in self._container:
return Unknown
if self.get_size()-len(set(self._container))>0: #some items are repeated
return False
else:
return True
def __getitem__(self, index):
# case bounded
if not self._unbounded:
if index<self._bound_1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound_1,index))
elif(index>self._bound_2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound_2,index))
else:
value = self._container[index-self._bound_1]
if value == None:
raise AssertionError("Value with index %i not defined. Please set the value first."%index)
return value
#case unbounded
else:
if index-self._bound_1>len(self._container):
raise AssertionError("Value with index %i not defined. Please set the value first."%index)
else:
value = self._container[index-self._bound_1]
if value == None:
raise AssertionError("Value with index %i not defined. Please set the value first."%index)
return value
def __setitem__(self, index, value):
# case bounded
if not self._unbounded:
if index<self._bound_1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound_1,index))
elif(index>self._bound_2):
raise IndexError("ARRAY index out of bound (upper bound is %i, passed %i)"%(self._bound_2,index))
else:
# first check the type of the value
check_type(value,self.get_type())
# then check if the value is already in the array
if self._unique:
if value in self._container:
raise AssertionError("UNIQUE keyword prevent inserting this instance.")
self._container[index-self._bound_1] = value
# case unbounded
else:
if index<self._bound_1:
raise IndexError("ARRAY index out of bound (lower bound is %i, passed %i)"%(self._bound_1,index))
# if the _container list is of good size, just do like the bounded case
if (index-self._bound_1<len(self._container)):
# first check the type of the value
check_type(value,self.get_type)
# then check if the value is already in the array
if self._unique:
if value in self._container:
raise AssertionError("UNIQUE keyword prevent inserting this instance.")
self._container[index-self._bound_1] = value
# in the other case, we have to extend the base _container list
else:
delta_size = (index-self._bound_1) - len(self._container) + 1
#create a list of None, and extend the list
list_extension = delta_size*[None]
self._container.extend(list_extension)
# first check the type of the value
check_type(value,self.get_type())
# then check if the value is already in the array
if self._unique:
if value in self._container:
raise AssertionError("UNIQUE keyword prevent inserting this instance.")
self._container[index-self._bound_1] = value
class BAG(BaseType.Type, BaseType.Aggregate):
"""
EXPRESS definition:
==================
A bag data type has as its domain unordered collections of like elements. The optional lower
and upper bounds, which are integer-valued expressions, define the minimum and maximum
number of elements that can be held in the collection defined by a bag data type.
Syntax:
170 bag_type = BAG [ bound_spec ] OF base_type .
176 bound_spec = '[' bound_1 ':' bound_2 ']' .
174 bound_1 = numeric_expression .
175 bound_2 = numeric_expression .
171 base_type = aggregation_types | simple_types | named_types .
Rules and restrictions:
a) The bound_1 expression shall evaluate to an integer value greater than or equal to
zero. It gives the lower bound, which is the minimum number of elements that can be in a
bag value of this data type. bound_1 shall not produce the indeterminate (?) value.
b) The bound_2 expression shall evaluate to an integer value greater than or equal to
bound_1, or an indeterminate (?) value. It gives the upper bound, which is the maximum
number of elements that can be in a bag value of this data type.
If this value is indeterminate (?) the number of elements in a bag value of this data type is
not be bounded from above.
c) If the bound_spec is omitted, the limits are [0:?].
EXAMPLE 29 (This example defines an attribute as a bag of point (where point is a named data
type assumed to have been declared elsewhere).
a_bag_of_points : BAG OF point;
The value of the attribute named a_bag_of_points can contain zero or more points. The same
point instance may appear more than once in the value of a_bag_of_points.
If the value is required to contain at least one element, the specification can provide a lower bound,
as in:
a_bag_of_points : BAG [1:?] OF point;
The value of the attribute named a_bag_of_points now must contain at least one point.
Python definition:
==================
@TODO
"""
def __init__( self , bound_1 , bound_2 , base_type , scope = None):
BaseType.Type.__init__(self, base_type, scope)
if not type(bound_1)==int:
raise TypeError("LIST lower bound must be an integer")
# bound_2 can be set to None
self._unbounded = False
if bound_2 == None:
self._unbounded = True
elif not type(bound_2)==int:
raise TypeError("LIST upper bound must be an integer")
if not bound_1>=0:
raise AssertionError("LIST lower bound must be greater of equal to 0")
if (type(bound_2)==int and not (bound_1 <= bound_2)):
raise AssertionError("ARRAY lower bound must be less than or equal to upper bound")
# set up class attributes
self._bound_1 = bound_1
self._bound_2 = bound_2
self._container = []
def bound_1(self):
return self._bound_1
def bound_2(self):
return self._bound_2
def add(self,value):
'''
Adds a value to the bag
'''
if self._unbounded:
check_type(value,self.get_type())
self._container.append(value)
else:
# first ensure that the bag is not full
if len(self._container) == self._bound_2 - self._bound_1 + 1:
raise AssertionError('BAG is full. Impossible to add any more item')
else:
check_type(value,self.get_type())
self._container.append(value)
def get_size(self):
''' When V is a bag, list or set, the returned value is the actual number of elements in
the aggregate value.'''
return INTEGER(len(self._container))
def get_hiindex(self):
''' When V is a bag, list or set, the returned value is the actual number of elements in
the aggregate value.'''
return INTEGER(len(self._container))
def get_loindex(self):
return INTEGER(1)
def get_hibound(self):
hibound = self._bound_2
if type(hibound)==int:
return INTEGER(hibound)
else:
return hibound
def get_lobound(self):
lobound = self._bound_1
if type(lobound)==int:
return INTEGER(lobound)
else:
return lobound
def get_value_unique(self):
''' Return True if all items are different in the container, UNKNOWN if some items are
indeterminate, or False otherwise'''
if None in self._container:
return Unknown
if self.get_size()-len(set(self._container))>0: #some items are repeated
return False
else:
return True
class SET(BaseType.Type, BaseType.Aggregate):
"""
EXPRESS definition:
==================
A set data type has as its domain unordered collections of like elements. The set data type is
a specialization of the bag data type. The optional lower and upper bounds, which are integer-
valued expressions, define the minimum and maximum number of elements that can be held in
the collection defined by a set data type. The collection defined by set data type shall not
contain two or more elements which are instance equal.
Syntax:
285 set_type = SET [ bound_spec ] OF base_type .
176 bound_spec = '[' bound_1 ':' bound_2 ']' .
174 bound_1 = numeric_expression .
175 bound_2 = numeric_expression .
171 base_type = aggregation_types | simple_types | named_types .
Rules and restrictions:
a) The bound_1 expression shall evaluate to an integer value greater than or equal to
zero. It gives the lower bound, which is the minimum number of elements that can be in a
set value of this data type. bound_1 shall not produce the indeterminate (?) value.
b) The bound_2 expression shall evaluate to an integer value greater than or equal to
bound_1, or an indeterminate (?) value. It gives the upper bound, which is the maximum
number of elements that can be in a set value of this data type.
If this value is indeterminate (?) the number of elements in a set value of this data type is
not be bounded from above.
c) If the bound_spec is omitted, the limits are [0:?].
d) Each element in an occurrence of a set data type shall be different from (i.e., not
instance equal to) every other element in the same set value.
EXAMPLE 30 { This example defines an attribute as a set of points (a named data type assumed
to have been declared elsewhere).
a_set_of_points : SET OF point;
The attribute named a_set_of_points can contain zero or more points. Each point instance (in
the set value) is required to be different from every other point in the set.
If the value is required to have no more than 15 points, the specification can provide an upper bound,
as in:
a_set_of_points : SET [0:15] OF point;
The value of the attribute named a_set_of_points now may contain no more than 15 points.
Python definition:
==================
The difference with the BAG class is that the base container for SET is a set object.
"""
def __init__( self , bound_1 , bound_2 , base_type , scope = None):
BaseType.Type.__init__(self, base_type, scope)
if not type(bound_1)==int:
raise TypeError("LIST lower bound must be an integer")
# bound_2 can be set to None
self._unbounded = False
if bound_2 == None:
self._unbounded = True
elif not type(bound_2)==int:
raise TypeError("LIST upper bound must be an integer")
if not bound_1>=0:
raise AssertionError("LIST lower bound must be greater of equal to 0")
if (type(bound_2)==int and not (bound_1 <= bound_2)):
raise AssertionError("ARRAY lower bound must be less than or equal to upper bound")
# set up class attributes
self._bound_1 = bound_1
self._bound_2 = bound_2
self._container = set()
def bound_1(self):
return self._bound_1
def bound_2(self):
return self._bound_2
def add(self,value):
'''
Adds a value to the bag
'''
if self._unbounded:
check_type(value,self.get_type())
self._container.add(value)
else:
# first ensure that the bag is not full
if len(self._container) == self._bound_2 - self._bound_1 + 1:
if not value in self._container:
raise AssertionError('SET is full. Impossible to add any more item')
else:
check_type(value,self.get_type())
self._container.add(value)
def get_size(self):
''' When V is a bag, list or set, the returned value is the actual number of elements in
the aggregate value.'''
return INTEGER(len(self._container))
def get_hiindex(self):
''' When V is a bag, list or set, the returned value is the actual number of elements in
the aggregate value.'''
return INTEGER(len(self._container))
def get_loindex(self):
return INTEGER(1)
def get_hibound(self):
hibound = self._bound_2
if type(hibound)==int:
return INTEGER(hibound)
else:
return hibound
def get_lobound(self):
lobound = self._bound_1
if type(lobound)==int:
return INTEGER(lobound)
else:
return lobound
def get_value_unique(self):
''' Return True if all items are different in the container, UNKNOWN if some items are
indeterminate, or False otherwise'''
if None in self._container:
return Unknown
else:
return True
|
lgpl-2.1
|
CSC301H-Fall2013/JuakStore
|
site-packages/build/lib/django/core/management/__init__.py
|
99
|
18224
|
import collections
import os
import sys
from optparse import OptionParser, NO_DEFAULT
import imp
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError, handle_default_options
from django.core.management.color import color_style
from django.utils.importlib import import_module
from django.utils._os import upath
from django.utils import six
# For backwards compatibility: get_version() used to be in this module.
from django import get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part, path)
except ImportError as e:
if os.path.basename(os.getcwd()) != part:
raise e
else:
if f:
f.close()
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
if f:
f.close()
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
from django.conf import settings
try:
apps = settings.INSTALLED_APPS
except ImproperlyConfigured:
# Still useful for commands that do not require functional settings,
# like startproject or help
apps = []
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = {}
for opt in klass.option_list:
if opt.default is NO_DEFAULT:
defaults[opt.dest] = None
else:
defaults[opt.dest] = opt.default
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behavior.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
from django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted([(k, v) for k, v in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) <= 2:
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
elif args[2] == '--commands':
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
else:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
elif subcommand == 'version':
sys.stdout.write(parser.get_version() + '\n')
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] in (['--help'], ['-h']):
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def setup_environ(settings_mod, original_settings_path=None):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
The "original_settings_path" parameter is optional, but recommended, since
trying to work out the original path from the module can be problematic.
"""
warnings.warn(
"The 'setup_environ' function is deprecated, "
"you likely need to update your 'manage.py'; "
"please see the Django 1.4 release notes "
"(https://docs.djangoproject.com/en/dev/releases/1.4/).",
DeprecationWarning)
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
if '__init__.py' in upath(settings_mod.__file__):
p = os.path.dirname(upath(settings_mod.__file__))
else:
p = upath(settings_mod.__file__)
project_directory, settings_filename = os.path.split(p)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
# Strip filename suffix to get the module name.
settings_name = os.path.splitext(settings_filename)[0]
# Strip $py for Jython compiled files (like settings$py.class)
if settings_name.endswith("$py"):
settings_name = settings_name[:-3]
# Set DJANGO_SETTINGS_MODULE appropriately.
if original_settings_path:
os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path
else:
# If DJANGO_SETTINGS_MODULE is already set, use it.
os.environ['DJANGO_SETTINGS_MODULE'] = os.environ.get(
'DJANGO_SETTINGS_MODULE',
'%s.%s' % (project_name, settings_name)
)
# Import the project module. We add the parent directory to PYTHONPATH to
# avoid some of the path errors new users can have.
sys.path.append(os.path.join(project_directory, os.pardir))
import_module(project_name)
sys.path.pop()
return project_directory
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def execute_manager(settings_mod, argv=None):
"""
Like execute_from_command_line(), but for use by manage.py, a
project-specific django-admin.py utility.
"""
warnings.warn(
"The 'execute_manager' function is deprecated, "
"you likely need to update your 'manage.py'; "
"please see the Django 1.4 release notes "
"(https://docs.djangoproject.com/en/dev/releases/1.4/).",
DeprecationWarning)
setup_environ(settings_mod)
utility = ManagementUtility(argv)
utility.execute()
|
mit
|
credativ/pulp
|
server/pulp/server/db/model/criteria.py
|
7
|
15518
|
from types import NoneType
import copy
import re
import sys
import pymongo
from pulp.common.dateutils import parse_iso8601_datetime
from pulp.server import exceptions as pulp_exceptions
from pulp.server.db.model.base import Model
class Criteria(Model):
def __init__(self, filters=None, sort=None, limit=None, skip=None, fields=None):
super(Criteria, self).__init__()
assert isinstance(filters, (dict, NoneType))
assert isinstance(sort, (list, tuple, NoneType))
assert isinstance(limit, (int, NoneType))
assert isinstance(skip, (int, NoneType))
assert isinstance(fields, (list, tuple, NoneType))
self.filters = filters
self.sort = sort
self.limit = limit
self.skip = skip
self.fields = fields
def as_dict(self):
"""
@return: the Criteria as a dict, suitable for serialization by
something like JSON, and compatible as input to the
from_dict method.
@rtype: dict
"""
return {
'filters': self.filters,
'sort': self.sort,
'limit': self.limit,
'skip': self.skip,
'fields': self.fields
}
@classmethod
def from_client_input(cls, doc):
"""
Accept input provided by a client (such as through a GET or POST
request), validate that the provided data is part of a Criteria
definition, and ensure that no additional data is present.
@param doc: a dict including only data that corresponds to attributes
of a Criteria object
@type doc: dict
@return: new Criteria instance based on provided data
@rtype: pulp.server.db.model.criteria.Criteria
"""
if not isinstance(doc, dict):
raise pulp_exceptions.InvalidValue(['criteria']), None, sys.exc_info()[2]
doc = copy.copy(doc)
filters = _validate_filters(doc.pop('filters', None))
sort = _validate_sort(doc.pop('sort', None))
limit = _validate_limit(doc.pop('limit', None))
skip = _validate_skip(doc.pop('skip', None))
fields = _validate_fields(doc.pop('fields', None))
if doc:
raise pulp_exceptions.InvalidValue(doc.keys())
DateOperator.apply(filters)
return cls(filters, sort, limit, skip, fields)
@classmethod
def from_dict(cls, input_dictionary):
"""
Convert a dictionary representation of the Criteria into a new Criteria object. The output
of as_dict() is suitable as input to this method.
:param input_dictionary: The dictionary representation of a Criteria object that will be
used to construct one.
:type input_dictionary: dict
:return: A new Criteria object
:rtype: Criteria
"""
return cls(input_dictionary['filters'], input_dictionary['sort'], input_dictionary['limit'],
input_dictionary['skip'], input_dictionary['fields'])
@property
def spec(self):
if self.filters is None:
return None
spec = copy.copy(self.filters)
_compile_regexs_for_not(spec)
return spec
class UnitAssociationCriteria(Model):
# Shadowed here for convenience
SORT_ASCENDING = pymongo.ASCENDING
SORT_DESCENDING = pymongo.DESCENDING
def __init__(self, type_ids=None, association_filters=None, unit_filters=None,
association_sort=None, unit_sort=None, limit=None, skip=None,
association_fields=None, unit_fields=None, remove_duplicates=False):
"""
There are a number of entry points into creating one of these instances:
multiple REST interfaces, the plugins, etc. As such, this constructor
does quite a bit of validation on the parameter values.
@param type_ids: list of types to search
@type type_ids: [str]
@param association_filters: mongo spec describing search parameters on
association metadata
@type association_filters: dict
@param unit_filters: mongo spec describing search parameters on unit
metadata; only used when a single type ID is specified
@type unit_filters: dict
@param association_sort: ordered list of fields and directions; may only
contain association metadata
@type association_sort: [(str, <SORT_* constant>)]
@param unit_sort: ordered list of fields and directions; only used when
a single type ID is specified
@type unit_sort: [(str, <SORT_* constant>)]
@param limit: maximum number of results to return
@type limit: int
@param skip: number of results to skip
@type skip: int
@param association_fields: if specified, only the given fields from the
association's metadata will be included in returned units
@type association_fields: list of str
@param unit_fields: if specified, only the given fields from the unit's
metadata are returned; only applies when a single type ID is
specified
@type unit_fields: list of str
@param remove_duplicates: if True, units with multiple associations will
only return a single association; defaults to False
@type remove_duplicates: bool
"""
super(UnitAssociationCriteria, self).__init__()
# A default instance will be used in the case where no criteria is
# passed in, so use sane defaults here.
if type_ids is not None and not isinstance(type_ids, (list, tuple)):
type_ids = [type_ids]
self.type_ids = type_ids
self.association_filters = association_filters or {}
self.unit_filters = unit_filters or {}
self.association_sort = association_sort
self.unit_sort = unit_sort
self.limit = limit
self.skip = skip
# The unit_id and unit_type_id are required as association returned data;
# frankly it doesn't make sense without them but it's also a technical
# requirement for the algorithm to run. Make sure they are there.
if association_fields is not None:
if 'unit_id' not in association_fields:
association_fields.append('unit_id')
if 'unit_type_id' not in association_fields:
association_fields.append('unit_type_id')
self.association_fields = association_fields
self.unit_fields = unit_fields
self.remove_duplicates = remove_duplicates
@classmethod
def from_client_input(cls, query):
"""
Parses a unit association query document and assembles a corresponding
internal criteria object.
Example:
{
"type_ids" : ["rpm"],
"filters" : {
"unit" : <mongo spec syntax>,
"association" : <mongo spec syntax>
},
"sort" : {
"unit" : [ ["name", "ascending"], ["version", "descending"] ],
"association" : [ ["created", "descending"] ]
},
"limit" : 100,
"skip" : 200,
"fields" : {
"unit" : ["name", "version", "arch"],
"association" : ["created"]
},
"remove_duplicates" : True
}
@param query: user-provided query details
@type query: dict
@return: criteria object for the unit association query
@rtype: L{UnitAssociationCriteria}
@raises ValueError: on an invalid value in the query
"""
query = copy.copy(query)
type_ids = query.pop('type_ids', None)
filters = query.pop('filters', None)
if filters is None:
association_filters = None
unit_filters = None
else:
association_filters = _validate_filters(filters.pop('association', None))
unit_filters = _validate_filters(filters.pop('unit', None))
sort = query.pop('sort', None)
if sort is None:
association_sort = None
unit_sort = None
else:
association_sort = _validate_sort(sort.pop('association', None))
unit_sort = _validate_sort(sort.pop('unit', None))
limit = _validate_limit(query.pop('limit', None))
skip = _validate_skip(query.pop('skip', None))
fields = query.pop('fields', None)
if fields is None:
association_fields = None
unit_fields = None
else:
association_fields = _validate_fields(fields.pop('association', None))
unit_fields = _validate_fields(fields.pop('unit', None))
remove_duplicates = bool(query.pop('remove_duplicates', False))
# report any superfluous doc key, value pairs as errors
for d in (query, filters, sort, fields):
if d:
raise pulp_exceptions.InvalidValue(d.keys())
# These are here for backward compatibility, in the future, these
# should be removed and the corresponding association_spec and unit_spec
# properties should be used
if association_filters:
_compile_regexs_for_not(association_filters)
if unit_filters:
_compile_regexs_for_not(unit_filters)
return cls(type_ids=type_ids, association_filters=association_filters,
unit_filters=unit_filters, association_sort=association_sort,
unit_sort=unit_sort, limit=limit, skip=skip,
association_fields=association_fields, unit_fields=unit_fields,
remove_duplicates=remove_duplicates)
@property
def association_spec(self):
if self.association_filters is None:
return None
association_spec = copy.copy(self.association_filters)
_compile_regexs_for_not(association_spec)
return association_spec
@property
def unit_spec(self):
if self.unit_filters is None:
return None
unit_spec = copy.copy(self.unit_filters)
_compile_regexs_for_not(unit_spec)
return unit_spec
def __str__(self):
s = ''
if self.type_ids:
s += 'Type IDs [%s] ' % self.type_ids
if self.association_filters:
s += 'Assoc Filters [%s] ' % self.association_filters
if self.unit_filters is not None:
s += 'Unit Filters [%s] ' % self.unit_filters
if self.association_sort is not None:
s += 'Assoc Sort [%s] ' % self.association_sort
if self.unit_sort is not None:
s += 'Unit Sort [%s] ' % self.unit_sort
if self.limit:
s += 'Limit [%s] ' % self.limit
if self.skip:
s += 'Skip [%s] ' % self.skip
if self.association_fields:
s += 'Assoc Fields [%s] ' % self.association_fields
if self.unit_fields:
s += 'Unit Fields [%s] ' % self.unit_fields
s += 'Remove Duplicates [%s]' % self.remove_duplicates
return s
def _validate_filters(filters):
if filters is None:
return None
if not isinstance(filters, dict):
raise pulp_exceptions.InvalidValue(['filters'])
return filters
def _validate_sort(sort):
"""
@type sort: list, tuple
@rtype: tuple
"""
if sort is None:
return None
if not isinstance(sort, (list, tuple)):
raise pulp_exceptions.InvalidValue(['sort']), None, sys.exc_info()[2]
try:
valid_sort = []
for entry in sort:
if not isinstance(entry[0], basestring):
raise TypeError('Invalid field name [%s]' % str(entry[0]))
flag = str(entry[1]).lower()
direction = None
if flag in ('ascending', '1'):
direction = pymongo.ASCENDING
if flag in ('descending', '-1'):
direction = pymongo.DESCENDING
if direction is None:
raise ValueError('Invalid sort direction [%s]' % flag)
valid_sort.append((entry[0], direction))
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['sort']), None, sys.exc_info()[2]
else:
return valid_sort
def _validate_limit(limit):
if isinstance(limit, bool):
raise pulp_exceptions.InvalidValue(['limit']), None, sys.exc_info()[2]
if limit is None:
return None
try:
limit = int(limit)
if limit < 1:
raise TypeError()
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['limit']), None, sys.exc_info()[2]
else:
return limit
def _validate_skip(skip):
if isinstance(skip, bool):
raise pulp_exceptions.InvalidValue(['skip']), None, sys.exc_info()[2]
if skip is None:
return None
try:
skip = int(skip)
if skip < 0:
raise TypeError()
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['skip']), None, sys.exc_info()[2]
else:
return skip
def _validate_fields(fields):
if fields is None:
return None
try:
if isinstance(fields, (basestring, dict)):
raise TypeError
fields = list(fields)
for f in fields:
if not isinstance(f, basestring):
raise TypeError()
except TypeError:
raise pulp_exceptions.InvalidValue(['fields']), None, sys.exc_info()[2]
return fields
def _compile_regexs_for_not(spec):
if not isinstance(spec, (dict, list, tuple)):
return
if isinstance(spec, (list, tuple)):
map(_compile_regexs_for_not, spec)
return
for key, value in spec.items():
if key == '$not' and isinstance(value, basestring):
spec[key] = re.compile(value)
_compile_regexs_for_not(value)
class DateOperator(object):
"""
The ``$date`` operator.
This operator is used to convert a ISO-8601 date string into a
python datetime object.
For example, this query:
{"created": {"$date": "2015-01-01T00:00:00Z"}}
is translated to:
{"created": <datetime>}
"""
KEY = '$date'
@staticmethod
def apply(query):
"""
Apply the operator translation to the query.
:param query: A database query.
:type query: dict
"""
if not query:
return
for key, value in query.items():
matched, translated = DateOperator.translate(value)
if matched:
query[key] = translated
continue
if not isinstance(value, dict):
continue
DateOperator.apply(value)
@staticmethod
def translate(value):
"""
Translate the operator *dict* into a datetime object.
An example of matched values: {"$date": "2015-01-01T00:00:00Z"}
:param value: The value to be translated.
:type value: dict
:return: A tuple of: (matched, translated)
:rtype: tuple
"""
matched = False
translated = value
operator = DateOperator.KEY
if isinstance(value, dict) and value.keys() == [operator]:
translated = parse_iso8601_datetime(value[operator])
matched = True
return matched, translated
|
gpl-2.0
|
fairwaves/umtrx_scripts
|
python_lib/umtrx_lms.py
|
1
|
40513
|
#!/usr/bin/env python
#
# Copyright 2012 Fairwaves
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import socket, argparse, time, math
import umtrx_ctrl
# pylint: disable = C0301, C0103, C0111
verbosity = 0
# Constants used during PLL tuning.
VCO_HIGH = 0x02
VCO_NORM = 0x00
VCO_LOW = 0x01
FREQ_LIST = [# min, max, val
(0.2325e9, 0.285625e9, 0x27),
(0.285625e9, 0.336875e9, 0x2f),
(0.336875e9, 0.405e9, 0x37),
(0.405e9, 0.465e9, 0x3f),
(0.465e9, 0.57125e9, 0x26),
(0.57125e9, 0.67375e9, 0x2e),
(0.67375e9, 0.81e9, 0x36),
(0.81e9, 0.93e9, 0x3e),
(0.93e9, 1.1425e9, 0x25),
(1.1425e9, 1.3475e9, 0x2d),
(1.3475e9, 1.62e9, 0x35),
(1.62e9, 1.86e9, 0x3d),
(1.86e9, 2.285e9, 0x24),
(2.285e9, 2.695e9, 0x2c),
(2.695e9, 3.24e9, 0x34),
(3.24e9, 3.72e9, 0x3c)]
# LPF code to bandwidth in MHz
LPF_CODE_TO_BW = {
0x0 : 14,
0x1 : 10,
0x2 : 7,
0x3 : 6,
0x4 : 5,
0x5 : 4.375,
0x6 : 3.5,
0x7 : 3,
0x8 : 2.75,
0x9 : 2.5,
0xa : 1.92,
0xb : 1.5,
0xc : 1.375,
0xd : 1.25,
0xe : 0.875,
0xf : 0.75,
}
# LPF bandwidth in MHz to LPF code
# Just reverse LPF_CODE_TO_BW for simplicity
LPF_BW_TO_CODE = dict((v,k) for k, v in LPF_CODE_TO_BW.iteritems())
# A list of reserved registers which read as junk
RESV_REGS = (0x0C, 0x0D, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x69, 0x6A, 0x6B, 0x6C, 0x6D)
def dump(lms_dev):
return [(x, lms_dev.reg_read(x),) for x in range(0, 128) if x not in RESV_REGS]
def select_freq(freq): # test if given freq within the range and return corresponding value
l = list(filter(lambda t: True if t[0] < freq <= t[1] else False, FREQ_LIST))
return l[0][2] if len(l) else None
def lms_txrx_pll_tune(lms_dev, base_reg, ref_clock, out_freq):
""" Tune Tx or RX PLL to a given frequency. Which PLL to tune is selected by
base_reg parameter: pass 0x10 for TX and 0x20 for RX. """
freqsel = select_freq(out_freq)
if freqsel is None:
print("Error: Output frequency is out of range")
return False
vco_x = 1 << ((freqsel & 0x7) - 3)
nint = int(vco_x * out_freq / ref_clock)
nfrac = int((1 << 23) * (vco_x * out_freq - nint * ref_clock) / ref_clock)
actual_freq = (nint + nfrac/float(1<<23)) * (ref_clock/vco_x);
if verbosity > 0: print("FREQSEL=%d VCO_X=%d NINT=%d NFRAC=%d" % (freqsel, vco_x, nint, nfrac))
# Write NINT, NFRAC
lms_dev.reg_write(base_reg+0x0, (nint >> 1) & 0xff) # NINT[8:1]
lms_dev.reg_write(base_reg+0x1, ((nfrac >> 16) & 0x7f) | ((nint & 0x1) << 7)) # NINT[0] NFRAC[22:16]
lms_dev.reg_write(base_reg+0x2, (nfrac >> 8) & 0xff) # NFRAC[15:8]
lms_dev.reg_write(base_reg+0x3, (nfrac) & 0xff) # NFRAC[7:0]
# Write FREQSEL
lms_dev.reg_write_bits(base_reg+0x5, (0x3f << 2), (freqsel << 2)) # FREQSEL[5:0]
# Reset VOVCOREG, OFFDOWN to default
# -- I think this is not needed here, as it changes settings which
# we may want to set beforehand.
# lms_dev.reg_write(base_reg+0x8, 0x40) # VOVCOREG[3:1] OFFDOWN[4:0]
# lms_dev.reg_write(base_reg+0x9, 0x94) # VOVCOREG[0] VCOCAP[5:0]
# Poll VOVCO
start_i = -1
stop_i = -1
state = VCO_HIGH
for i in range(0, 64):
lms_dev.reg_write_bits(base_reg+0x9, 0x3f, i)
comp = lms_dev.reg_read(base_reg+0xa)
if comp is None:
return False
vcocap = comp >> 6
if verbosity > 1: print("VOVCO[%d]=%x" % (i, vcocap))
if VCO_HIGH == vcocap:
pass
elif VCO_LOW == vcocap:
if state == VCO_NORM:
stop_i = i - 1
state = VCO_LOW
if verbosity > 1: print("Low")
elif VCO_NORM == vcocap:
if state == VCO_HIGH:
start_i = i
state = VCO_NORM
if verbosity > 1: print("Norm")
else:
print("ERROR: Incorrect VCOCAP reading while tuning")
return False
if VCO_NORM == state:
stop_i = 63
if start_i == -1 or stop_i == -1:
print("ERROR: Can't find VCOCAP value while tuning")
return False
# Tune to the middle of the found VCOCAP range
avg_i = int((start_i + stop_i) / 2)
if verbosity > 0: print("START=%d STOP=%d SET=%d" % (start_i, stop_i, avg_i))
if verbosity > 0: print("Actual frequency: %f" % (actual_freq))
lms_dev.reg_write_bits(base_reg+0x9, 0x3f, avg_i)
return True
def lms_tx_pll_tune(lms_dev, ref_clock, out_freq):
""" Tune TX PLL to a given frequency. """
return lms_txrx_pll_tune(lms_dev, 0x10, ref_clock, out_freq)
def lms_rx_pll_tune(lms_dev, ref_clock, out_freq):
""" Tune TX PLL to a given frequency. """
return lms_txrx_pll_tune(lms_dev, 0x20, ref_clock, out_freq)
def lms_init(lms_dev):
""" INIT with default values (taken from the LMS EVB software)"""
lms_dev.reg_write(0x09, 0x00) # RXOUTSW (disabled), CLK_EN (all disabled)
lms_dev.reg_write(0x17, 0xE0)
lms_dev.reg_write(0x27, 0xE3)
lms_dev.reg_write(0x64, 0x32)
lms_dev.reg_write(0x70, 0x01)
lms_dev.reg_write(0x79, 0x37)
lms_dev.reg_write(0x59, 0x09)
lms_dev.reg_write(0x47, 0x40)
# RF Settings
lms_dev.reg_write(0x41, 0x15) # VGA1GAIN
lms_dev.reg_write(0x45, 0x00) # VGA2GAIN, ENVD
# Test settings
# lms_dev.reg_set_bits(0x35, (1<<6)) # Set BYP_EN_LPF
# lms_dev.reg_set_bits(0x09, (1<<7)) # Enable RXOUTSW
def lms_tx_enable(lms_dev):
""" Enable TX """
# STXEN: Soft transmit enable
lms_dev.reg_set_bits(0x05, (1 << 3))
# Tx DSM SPI clock enabled
lms_dev.reg_set_bits(0x09, (1 << 0))
def lms_tx_disable(lms_dev):
""" Disable TX """
# STXEN: Soft transmit enable
lms_dev.reg_clear_bits(0x05, (1 << 3))
# Tx DSM SPI clock enabled
lms_dev.reg_clear_bits(0x09, (1 << 0))
def lms_rx_enable(lms_dev):
""" Enable RX """
# SRXEN: Soft receive enable
lms_dev.reg_set_bits(0x05, (1 << 2))
# Rx DSM SPI clock enabled
lms_dev.reg_set_bits(0x09, (1 << 2))
def lms_rx_disable(lms_dev):
""" Disable RX """
# SRXEN: Soft receive enable
lms_dev.reg_clear_bits(0x05, (1 << 2))
# Rx DSM SPI clock enabled
lms_dev.reg_clear_bits(0x09, (1 << 2))
def lms_get_tx_pa(lms_dev):
""" Reurn selected Tx PA."""
return lms_dev.reg_get_bits(0x44, (0x07 << 3), 3)
def lms_set_tx_pa(lms_dev, pa):
""" Turn on selected Tx PA.
'pa' parameter is in [0..2] range, where 0 is to turn off all PAs."""
lms_dev.reg_write_bits(0x44, (0x07 << 3), (pa << 3))
def lms_get_rx_lna(lms_dev):
""" Reurn selected Rx LNA."""
# Note: We should also check register 0x25 here, but it's not clear
# what to return if 0x75 and 0x25 registers select different LNAs.
# LNASEL_RXFE[1:0]: Selects the active LNA.
return lms_dev.reg_get_bits(0x75, (0x03 << 4), 4)
def lms_set_rx_lna(lms_dev, lna):
""" Turn on selected Rx LNA.
'lna' parameter is in [0..3] range, where 0 is to turn off all LNAs."""
if not (0 <= lna <= 3): return None
# LNASEL_RXFE[1:0]: Selects the active LNA.
lms_dev.reg_write_bits(0x75, (0x03 << 4), (lna << 4))
# SELOUT[1:0]: Select output buffer in RX PLL, not used in TX PLL
lms_dev.reg_write_bits(0x25, 0x03, lna)
def lms_set_tx_vga1gain(lms_dev, gain):
""" Set Tx VGA1 gain in dB.
gain is in [-35 .. -4] dB range
Returns the old gain value on success, None on error"""
if not (-35 <= gain <= -4): return None
old_bits = lms_dev.reg_write_bits(0x41, 0x1f, 35 + gain)
return (old_bits & 0x1f) - 35
def lms_get_tx_vga1gain(lms_dev):
""" Get Tx VGA1 gain in dB.
gain is in [-35 .. -4] dB range
Returns the gain value on success, None on error"""
return lms_dev.reg_get_bits(0x41, 0x1f, 0)-35
def lms_set_tx_vga2gain(lms_dev, gain):
""" Set Tx VGA2 gain.
gain is in dB [0 .. 25]
Returns the old gain value on success, None on error"""
if not (0 <= gain <= 25): return None
old_bits = lms_dev.reg_write_bits(0x45, (0x1f << 3), (gain << 3))
return old_bits >> 3
def lms_get_tx_vga2gain(lms_dev):
""" Get Tx VGA2 gain in dB.
gain is in [0 .. 25] dB range
Returns the gain value on success, None on error"""
gain = lms_dev.reg_get_bits(0x45, (0x1f << 3), 3)
gain = gain if gain <= 25 else 25
return gain
def lms_set_rx_lna_gain(lms_dev, gain):
""" Set Rx LNA gain mode.
gain mode is:
3 - max gain
2 - mid gain (max gain-6dB)
1 - LNA bypassed
Returns the old gain value on success, None on error"""
if not (1 <= gain <= 3): return None
old_bits = lms_dev.reg_write_bits(0x75, (0x3 << 6), (gain << 6))
return (old_bits >> 6) & 0x3
def lms_get_rx_lna_gain(lms_dev):
""" Get Rx LNA gain mode.
gain mode is:
3 - max gain
2 - mid gain (max gain-6dB)
1 - LNA bypassed
Returns the gain mode value on success, None on error"""
gain = lms_dev.reg_get_bits(0x75, (0x3 << 6), 6)
return gain
def lms_set_rx_vga1gain_int(lms_dev, gain):
""" Set Rx VGA1 gain raw value.
gain is raw values [0 .. 120]
Returns the old gain value on success, None on error"""
if not (0 <= gain <= 120):
return None
old_bits = lms_dev.reg_write_bits(0x76, 0x7f, gain)
return old_bits & 0x7f
def lms_get_rx_vga1gain_int(lms_dev):
""" Get Rx VGA1 gain raw value.
gain is in [0 .. 120] range of abstract values
Returns the gain value on success, None on error"""
gain = lms_dev.reg_get_bits(0x76, 0x7f, 0)
return gain
def lms_rxvga1_int_to_db(code):
return 5.0 + 20*math.log10(127.0/(127.0-code));
def lms_rxvga1_db_to_int(db):
return int(127.5 - 127 / pow(10, (db-5.0)/20))
def lms_set_rx_vga1gain(lms_dev, gain):
""" Set Rx VGA1 gain.
gain is in [5.0 .. 30.17] dB range
Returns the old gain value on success, None on error"""
code = lms_rxvga1_db_to_int(gain)
return lms_rxvga1_int_to_db(lms_set_rx_vga1gain_int(lms_dev, code))
def lms_get_rx_vga1gain(lms_dev):
""" Get Rx VGA1 gain in dB.
gain is in [5.0 .. 30.17] dB range
Returns the gain value on success, None on error"""
return lms_rxvga1_int_to_db(lms_get_rx_vga1gain_int(lms_dev))
def lms_set_rx_vga2gain(lms_dev, gain):
""" Set Rx VGA2 gain.
gain is in dB [0 .. 60]
Returns the old gain value on success, None on error"""
if not (0 <= gain <= 60): return None
old_bits = lms_dev.reg_write_bits(0x65, 0x1f, gain/3)
return (old_bits & 0x1f) * 3
def lms_get_rx_vga2gain(lms_dev):
""" Get Rx VGA2 gain in dB.
gain is in [0 .. 60] dB range
Returns the gain value on success, None on error"""
gain = lms_dev.reg_get_bits(0x65, 0x1f, 0)
gain = gain if gain <= 20 else 20
return gain * 3
def lms_set_tx_lpf_raw(lms_dev, val):
""" Set Tx LPF bandwidth raw value.
val is in [0 .. 15] range
Returns the old gain value on success, None on error"""
if not (0 <= val <= 15): return None
old_bits = lms_dev.reg_write_bits(0x34, (0x0f << 2), (int(val) << 2))
return (old_bits >> 2) & 0x0f
def lms_set_tx_lpf(lms_dev, val):
""" Set Tx LPF bandwidth in MHz.
val is in [0 .. 14] MHz range
Returns the old gain value on success, None on error"""
return lms_set_rx_lpf_raw(lms_dev, LPF_BW_TO_CODE[val])
def lms_get_tx_lpf_raw(lms_dev):
""" Get Tx LPF bandwidth raw value.
return value is in [0 .. 15] range
Returns the gain value on success, None on error"""
value = lms_dev.reg_get_bits(0x34, (0x0f << 2), 2)
return value
def lms_get_tx_lpf(lms_dev):
""" Get Tx LPF bandwidth in MHz.
return value is in [0.75 .. 14] MHz range
Returns the gain value on success, None on error"""
return LPF_CODE_TO_BW[lms_get_rx_lpf_raw(lms_dev)]
def lms_set_rx_lpf_raw(lms_dev, val):
""" Set Rx LPF bandwidth raw value.
val is in [0 .. 15] range
Returns the old gain value on success, None on error"""
if not (0 <= val <= 15): return None
old_bits = lms_dev.reg_write_bits(0x54, (0x0f << 2), (int(val) << 2))
return (old_bits >> 2) & 0x0f
def lms_set_rx_lpf(lms_dev, val):
""" Set Rx LPF bandwidth in MHz.
val is in [0 .. 14] MHz range
Returns the old gain value on success, None on error"""
return lms_set_rx_lpf_raw(lms_dev, LPF_BW_TO_CODE[val])
def lms_get_rx_lpf_raw(lms_dev):
""" Get Rx Rx LPF bandwidth raw value.
return value is in [0 .. 15] range
Returns the gain value on success, None on error"""
value = lms_dev.reg_get_bits(0x54, (0x0f << 2), 2)
return value
def lms_get_rx_lpf(lms_dev):
""" Get Rx Rx LPF bandwidth in MHz.
return value is in [0.75 .. 14] MHz range
Returns the gain value on success, None on error"""
return LPF_CODE_TO_BW[lms_get_rx_lpf_raw(lms_dev)]
def lms_set_vga1dc_i_int(lms_dev, dc_shift_int):
""" Set VGA1 DC offset, I channel
dc_shift_int is an integer representation of the DC shift [0 .. 255]
DC offset = (dc_shift_int - 128) / 16
Returns the old offset value on success, None on error"""
if not (0 <= dc_shift_int <= 255): return None
return lms_dev.reg_write_bits(0x42, 0xff, dc_shift_int)
def lms_get_vga1dc_i_int(lms_dev):
""" Get VGA1 DC offset, I channel
Returns the offset value on success, None on error"""
return lms_dev.reg_get_bits(0x42, 0xff, 0)
def lms_set_vga1dc_i(lms_dev, dc_shift):
""" Set VGA1 DC offset, I channel
dc_shift is an a DC shift in mV [-16 .. 15.9375]
Returns the old offset value on success, None on error"""
old_bits = lms_set_vga1dc_i_int(lms_dev, int(dc_shift*16 + 128))
return (float(old_bits) - 128) / 16
def lms_set_vga1dc_q_int(lms_dev, dc_shift_int):
""" Set VGA1 DC offset, Q channel
dc_shift_int is an integer representation of the DC shift [0 .. 255]
DC offset = (dc_shift_int - 128) / 16
Returns the old offset value on success, None on error"""
if not (0 <= dc_shift_int <= 255): return None
return lms_dev.reg_write_bits(0x43, 0xff, dc_shift_int)
def lms_get_vga1dc_q_int(lms_dev):
""" Get VGA1 DC offset, Q channel
Returns the offset value on success, None on error"""
return lms_dev.reg_get_bits(0x43, 0xff, 0)
def lms_set_vga1dc_q(lms_dev, dc_shift):
""" Set VGA1 DC offset, Q channel
dc_shift is an a DC shift in mV [-16 .. 15.9375]
Returns old offset value on success, None on error"""
old_bits = lms_set_vga1dc_q_int(lms_dev, int(dc_shift*16 + 128))
return (float(old_bits) - 128) / 16
# RF Settings for LO leakage tuning
# lms_dev.reg_write(0x41, (-4 + 35)) # VGA1GAIN
# lms_dev.reg_write(0x45, (25 << 3) | 0x0) # VGA2GAIN, ENVD
# lms_dev.reg_write(0x44, (2 << 3) | (1 << 1) | 1) # PA2 on
def lms_general_dc_calibration_loop(lms_dev, dc_addr, calibration_reg_base):
""" Programming and Calibration Guide: 4.1 General DC Calibration Procedure """
try_cnt_limit = 10
if verbosity > 0: print("DC Offset Calibration for addr %d:" % (dc_addr,))
reg_val = lms_dev.reg_read(calibration_reg_base+0x03)
# DC_ADDR := ADDR
reg_val = (reg_val & 0xf8) | dc_addr
lms_dev.reg_write(calibration_reg_base+0x03, reg_val)
# DC_START_CLBR := 1
reg_val = reg_val | (1 << 5)
lms_dev.reg_write(calibration_reg_base+0x03, reg_val)
# DC_START_CLBR := 0
reg_val = reg_val ^ (1 << 5)
lms_dev.reg_write(calibration_reg_base+0x03, reg_val)
while try_cnt_limit:
try_cnt_limit -= 1
if verbosity > 1: print("cnt=%d" % try_cnt_limit)
# Wait for 6.4(1.6) us
time.sleep(6.4e-6)
# Read DC_CLBR_DONE
reg_val = lms_dev.reg_read(calibration_reg_base+0x01)
DC_CLBR_DONE = (reg_val >> 1) & 0x1
if verbosity > 1: print(" DC_CLBR_DONE=%d" % DC_CLBR_DONE)
# DC_CLBR_DONE == 1?
if DC_CLBR_DONE == 1:
continue
# Read DC_LOCK
reg_val = lms_dev.reg_read(calibration_reg_base+0x01)
DC_LOCK = (reg_val >> 2) & 0x7
if verbosity > 1: print(" DC_LOCK=%d" % DC_LOCK)
# Read DC_REGVAL
DC_REGVAL = lms_dev.reg_read(calibration_reg_base+0x00)
if verbosity > 1: print("DC_REGVAL = %d" % DC_REGVAL)
# DC_LOCK != 0 or 7?
if DC_LOCK != 0 and DC_LOCK != 7:
# We're done.
break
# Return the value
return DC_REGVAL
def lms_general_dc_calibration(lms_dev, dc_addr, calibration_reg_base):
# This procedure is outlined in FAQ, section 4.7.
# It's purpose is to circumvent the fact that in some edge cases calibration
# may be successful even is DC_LOCK shows 0 or 7.
# Set DC_REGVAL to 31
lms_dev.reg_write(calibration_reg_base+0x00, 31)
# Run the calibration first time
DC_REGVAL = lms_general_dc_calibration_loop(lms_dev, dc_addr, calibration_reg_base)
# Unchanged DC_REGVAL may mean either calibration failure or that '31' is
# the best calibration vaue. We're going to re-check that.
if 31 == DC_REGVAL:
# Set DC_REGVAL to a value other then 31, e.g. 0
lms_dev.reg_write(calibration_reg_base+0x00, 0)
# Retry the calibration
DC_REGVAL = lms_general_dc_calibration_loop(lms_dev, dc_addr, calibration_reg_base)
# If DC_REGVAL has been changed, then calibration succeeded.
if 0 == DC_REGVAL:
# PANIC: Algorithm does Not Converge!
# From LimeMicro FAQ: "[This] condition should not happen as this is being
# checked in our production test."
print("Error: DC Offset Calibration does not converge!")
return None
if verbosity > 0: print("Successful DC Offset Calibration for register bank 0x%X, DC addr %d. Result: 0x%X" \
% (calibration_reg_base, dc_addr, DC_REGVAL))
return DC_REGVAL
def lms_lpf_tuning_dc_calibration(lms_dev):
""" Programming and Calibration Guide: 4.2 DC Offset Calibration of LPF Tuning Module """
result = False
# Save TopSPI::CLK_EN[5] Register
# TopSPI::CLK_EN[5] := 1
clk_en_save = lms_dev.reg_set_bits(0x09, (1 << 5))
# Perform DC Calibration Procedure in TopSPI with ADDR := 0 and get Result
# DCCAL := TopSPI::DC_REGVAL
DCCAL = lms_general_dc_calibration(lms_dev, 0, 0x0)
if DCCAL is not None:
# RxLPFSPI::DCO_DACCAL := DCCAL
lms_dev.reg_write_bits(0x35, 0x3f, DCCAL)
# TxLPFSPI::DCO_DACCAL := DCCAL
lms_dev.reg_write_bits(0x55, 0x3f, DCCAL)
# Success
result = True
# Restore TopSPI::CLK_EN[5] Register
lms_dev.reg_write(0x09, clk_en_save)
return result
def lms_txrx_lpf_dc_calibration(lms_dev, is_tx):
""" Programming and Calibration Guide: 4.3 TX/RX LPF DC Offset Calibration """
# Determine base address for control registers
control_reg_base = 0x30 if is_tx else 0x50
# Save TopSPI::CLK_EN Register
# TopSPI::CLK_EN := 1
clk_en_save = lms_dev.reg_set_bits(0x09, (1 << 1) if is_tx else (1 << 3))
# Perform DC Calibration Procedure in LPFSPI with ADDR := 0 (For channel I)
result = lms_general_dc_calibration(lms_dev, 0, control_reg_base) is not None
# Perform DC Calibration Procedure in LPFSPI with ADDR := 1 (For channel Q)
result = lms_general_dc_calibration(lms_dev, 1, control_reg_base) is not None and result
# Restore TopSPI::CLK_EN Register
lms_dev.reg_write(0x09, clk_en_save)
return result
def lms_rxvga2_dc_calibration(lms_dev):
""" Programming and Calibration Guide: 4.4 RXVGA2 DC Offset Calibration """
# Set base address for control registers
control_reg_base = 0x60
# Save TopSPI::CLK_EN Register
# TopSPI::CLK_EN := 1
clk_en_save = lms_dev.reg_set_bits(0x09, (1 << 4))
# Perform DC Calibration Procedure in RxVGA2SPI with ADDR := 0 (For DC Reference channel)
result = lms_general_dc_calibration(lms_dev, 0, control_reg_base) is not None
# Perform DC Calibration Procedure in RxVGA2SPI with ADDR := 1 (For VGA2A_I channel)
result = lms_general_dc_calibration(lms_dev, 1, control_reg_base) is not None and result
# Perform DC Calibration Procedure in RxVGA2SPI with ADDR := 2 (For VGA2A_Q channel)
result = lms_general_dc_calibration(lms_dev, 2, control_reg_base) is not None and result
# Perform DC Calibration Procedure in RxVGA2SPI with ADDR := 3 (For VGA2B_I channel)
result = lms_general_dc_calibration(lms_dev, 3, control_reg_base) is not None and result
# Perform DC Calibration Procedure in RxVGA2SPI with ADDR := 4 (For VGA2B_Q channel)
result = lms_general_dc_calibration(lms_dev, 4, control_reg_base) is not None and result
# Restore TopSPI::CLK_EN Register
lms_dev.reg_write(0x09, clk_en_save)
return result
def lms_lpf_bandwidth_tuning(lms_dev, ref_clock, lpf_bandwidth_code):
""" Programming and Calibration Guide: 4.5 LPF Bandwidth Tuning.
Note, that this function modifies Tx PLL settings. """
# Save registers 0x05 and 0x09, because we will modify them during lms_tx_enable()
reg_save_05 = lms_dev.reg_read(0x05)
reg_save_09 = lms_dev.reg_read(0x09)
# Enable TxPLL and tune it to 320MHz
lms_tx_enable(lms_dev)
lms_tx_pll_tune(lms_dev, ref_clock, int(320e6))
# Use 40MHz generatedFrom TxPLL: TopSPI::CLKSEL_LPFCAL := 0
# Power Up LPF tuning clock generation block: TopSPI::PD_CLKLPFCAL := 0
reg_save_06 = lms_dev.reg_clear_bits(0x06, (1 << 3) | (1 << 2))
# Set TopSPI::BWC_LPFCAL
# Set EN_CAL_LPFCAL := 1 (Block enabled)
t = lms_dev.reg_write_bits(0x07, 0x8f, (1<<7)|lpf_bandwidth_code)
if verbosity >= 3: print("code = %x %x %x" % (lpf_bandwidth_code, t, lms_dev.reg_read(0x07)))
# TopSPI::RST_CAL_LPFCAL := 1 (Rst Active)
lms_dev.reg_set_bits(0x06, (1 << 0))
# ...Delay 100ns...
# TopSPI::RST_CAL_LPFCAL := 0 (Rst Inactive)
lms_dev.reg_clear_bits(0x06, (1 << 0))
# RCCAL := TopSPI::RCCAL_LPFCAL
RCCAL = lms_dev.reg_read(0x01) >> 5
if verbosity >= 0: print("RCCAL = %d" % RCCAL)
# RxLPFSPI::RCCAL_LPF := RCCAL
lms_dev.reg_write_bits(0x56, (7 << 4), (RCCAL << 4))
# TxLPFSPI::RCCAL_LPF := RCCAL
lms_dev.reg_write_bits(0x36, (7 << 4), (RCCAL << 4))
# Shut down calibration.
# TopSPI::RST_CAL_LPFCAL := 1 (Rst Active)
lms_dev.reg_set_bits(0x06, (1 << 0))
# Set EN_CAL_LPFCAL := 0 (Block disabled)
lms_dev.reg_clear_bits(0x07, (1 << 7))
# Restore registers 0x05, 0x06 and 0x09
lms_dev.reg_write(0x06, reg_save_06)
lms_dev.reg_write(0x05, reg_save_05)
lms_dev.reg_write(0x09, reg_save_09)
def lms_auto_calibration(lms_dev, ref_clock, lpf_bandwidth_code):
""" Performs all automatic calibration procedures in a recommeded order.
Notes:
0. Do not forget, that you should not apply any data for Tx during
the calibration. Rx should be disconnected as well, but we try
to handle this in the code.
1. It tunes Tx to 320MHz, so you have to re-tune to your frequency
after the calibration.
2. It's better to calibrate with your target TxVGA1 gain. If you
don't know your target gain yet, choose one <= -7dB to TX mixer
overload. TxVGA1 gain of -10dB is good choice.
3. TxVGA2 gain doesn't impact DC offset or LO leakage, because
it is in RF and is AC coupled. So we don't touch it. Thus TxVGA2
gain is irrelevant for the purpose of this calibration.
4. RxVGA2 gain is irrelevant, because it's set to 30dB during the
calibration and then restored to the original value.
"""
print("LPF Tuning...")
lms_lpf_tuning_dc_calibration(lms_dev)
print("LPF Bandwidth Tuning...")
lms_lpf_bandwidth_tuning(lms_dev, ref_clock, lpf_bandwidth_code)
print("Tx LPF DC calibration...")
lms_txrx_lpf_dc_calibration(lms_dev, True)
# Disable Rx
# We use this way of disabling Rx, because we have to leave
# RXMIX working. If we disable MIX, calibration will not fail,
# but DC cancellation will be a bit worse. And setting LNASEL_RXFE
# to 0 disables RXMIX. So instead of that we select LNA1 and then
# connect it to the internal termination resistor with
# IN1SEL_MIX_RXFE and RINEN_MIX_RXFE configuration bits.
lna = lms_get_rx_lna(lms_dev)
# 1. Select LNA1
lms_set_rx_lna(lms_dev, 1)
# 2. Connect LNA to external inputs.
# IN1SEL_MIX_RXFE: Selects the input to the mixer
reg_save_71 = lms_dev.reg_clear_bits(0x71, (1 << 7))
# 3. Enable internal termination resistor.
# RINEN_MIX_RXFE: Termination resistor on external mixer input enable
reg_save_7C = lms_dev.reg_set_bits(0x7C, (1 << 2))
# Set RxVGA2 gain to max
rx_vga2gain = lms_set_rx_vga2gain(lms_dev, 30)
# Calibrate!
print("Rx LPF DC calibration...")
lms_txrx_lpf_dc_calibration(lms_dev, False)
print("RxVGA2 DC calibration...")
lms_rxvga2_dc_calibration(lms_dev)
# Restore saved values
lms_set_rx_vga2gain(lms_dev, rx_vga2gain)
lms_dev.reg_write(0x71, reg_save_71)
lms_dev.reg_write(0x7C, reg_save_7C)
lms_set_rx_lna(lms_dev, lna)
def enable_loopback(lms_dev):
""" Enable loopback"""
lms_dev.reg_set_bits(0x35, (76)) #
lms_dev.reg_set_bits(0x64, (28)) # power off RXVGA2
lms_dev.reg_set_bits(0x09, (192)) # RXOUTSW is closed
lms_dev.reg_set_bits(0x46, (1 << 2))
lms_dev.reg_set_bits(0x08, (1 << 4)) # LBEN_OPIN switched on
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'UmTRX LMS debugging tool.', epilog = "UmTRX is detected via broadcast unless explicit address is specified via --umtrx-addr option. 'None' returned while reading\writing indicates error in the process.")
parser.add_argument('--version', action='version', version='%(prog)s 3.2')
parser.add_argument('--lms', type = int, choices = list(range(1, 3)), help = 'LMS number, if no other options are given it will dump all registers for corresponding LMS')
parser.add_argument('--reg', type = lambda s: int(s, 16), choices = range(0, 0x80), metavar = '0..0x79', help = 'LMS register number, hex')
parser.add_argument('--verify', action = 'store_true', help = 'read back written register value to verify correctness')
parser.add_argument('--pll-ref-clock', type = float, default = 26e6, help = 'PLL reference clock, 26MHz by default')
parser.add_argument('--lpf-bandwidth-code', type = lambda s: int(s, 16), choices = range(0, 0x10), metavar = '0..0x0f', help = 'LPF bandwidth code (default: 0x0f), used only with --lms-lpf-bandwidth-tuning and --lms-auto-calibration')
basic_opt = parser.add_mutually_exclusive_group()
basic_opt.add_argument('--detect', dest = 'bcast_addr', default = '192.168.10.255', help='broadcast domain where UmTRX should be discovered (default: 192.168.10.255)')
basic_opt.add_argument('--umtrx-addr', dest = 'umtrx', const = '192.168.10.2', nargs='?', help = 'UmTRX address (default: 192.168.10.2)')
adv_opt = parser.add_mutually_exclusive_group()
adv_opt.add_argument('--data', type = lambda s: int(s, 16), choices = range(0, 0x100), metavar = '0..0xFF', help = 'data to be written into LMS register, hex')
adv_opt.add_argument('--dump', action = 'store_true', help = 'dump registers')
adv_opt.add_argument('--lms-init', action = 'store_true', help = 'run init sequence for LMS')
adv_opt.add_argument('--lms-tx-enable', type = int, choices = range(0, 2), help = '1 to enable TX chain, 0 to disable TX chain')
adv_opt.add_argument('--lms-rx-enable', type = int, choices = range(0, 2), help = '1 to enable RX chain, 0 to disable RX chain')
adv_opt.add_argument('--lms-auto-calibration', action = 'store_true', help = 'A shorthand for --lms-lpf-tuning-dc-calibration, --lms-tx-lpf-dc-calibration, --lms-rx-lpf-dc-calibration, --lms-rxvga2-dc-calibration and --lms-lpf-bandwidth-tuning.')
adv_opt.add_argument('--lms-lpf-tuning-dc-calibration', action = 'store_true', help = 'DC Offset Calibration of LPF Tuning Module')
adv_opt.add_argument('--lms-tx-lpf-dc-calibration', action = 'store_true', help = 'TX LPF DC Offset Calibration')
adv_opt.add_argument('--lms-rx-lpf-dc-calibration', action = 'store_true', help = 'RX LPF DC Offset Calibration')
adv_opt.add_argument('--lms-rxvga2-dc-calibration', action = 'store_true', help = 'RXVGA2 DC Offset Calibration')
adv_opt.add_argument('--lms-lpf-bandwidth-tuning', action = 'store_true', help = 'LPF bandwidth tuning. Specify --lpf-bandwidth-code to select specific LPF to tune. WARNING: Tx PLL is tuned to 320MHz during this procedure. Don\'t forget to re-tune it back if needed.')
adv_opt.add_argument('--lms-set-tx-pa', type = int, choices = range(0, 3), help = 'Activate selected Tx PA, i.e. select LMS output. 0 to turn off all PAs')
adv_opt.add_argument('--lms-get-tx-pa', action = 'store_true', help = 'Get active PA, i.e. active LMS output. 0 if all outputs are disabled')
adv_opt.add_argument('--lms-set-rx-lna', type = int, choices = range(0, 4), help = 'Activate selected Rx LNA, i.e. select LMS input. 0 to turn off all LNAs')
adv_opt.add_argument('--lms-get-rx-lna', action = 'store_true', help = 'Get active LNA, i.e. active LMS input. 0 if all inputs are disabled')
adv_opt.add_argument('--lms-tx-pll-tune', type = float, metavar = '232.5e6..3720e6', help = 'Tune Tx PLL to the given frequency')
adv_opt.add_argument('--lms-rx-pll-tune', type = float, metavar = '232.5e6..3720e6', help = 'Tune Rx PLL to the given frequency')
adv_opt.add_argument('--lms-set-tx-vga1-gain', type = int, choices = range(-35, -3), metavar = '[-35..-4]', help = 'Set Tx VGA1 gain, in dB')
adv_opt.add_argument('--lms-get-tx-vga1-gain', action = 'store_true', help = 'Get Tx VGA1 gain, in dB')
adv_opt.add_argument('--lms-set-tx-vga2-gain', type = int, choices = range(0, 26), metavar = '[0..25]', help = 'Set Tx VGA2 gain, in dB')
adv_opt.add_argument('--lms-get-tx-vga2-gain', action = 'store_true', help = 'Get Tx VGA2 gain, in dB')
adv_opt.add_argument('--lms-set-rx-vga1-gain-int', type = int, choices = range(0, 121), metavar = '[0..120]', help = 'Set Rx VGA1 gain raw value. 120=30dB, 102=19dB, 2=5dB')
adv_opt.add_argument('--lms-get-rx-vga1-gain-int', action = 'store_true', help = 'Get Rx VGA1 gain raw value')
adv_opt.add_argument('--lms-set-rx-vga1-gain', type = float, metavar = '5.0..30.17', help = 'Set Rx VGA1 gain in dB')
adv_opt.add_argument('--lms-get-rx-vga1-gain', action = 'store_true', help = 'Get Rx VGA1 gain in dB')
adv_opt.add_argument('--lms-set-rx-vga2-gain', type = int, choices = range(0, 61), metavar = '[0..60]', help = 'Set Rx VGA2 gain, in dB with 3dB accuracy.')
adv_opt.add_argument('--lms-get-rx-vga2-gain', action = 'store_true', help = 'Get Rx VGA2 gain, in dB')
adv_opt.add_argument('--lms-set-vga1-dc-i', type = int, choices = range(0, 255), metavar = '[0..255]', help = 'Set TxVGA1 DC shift, I channel.')
adv_opt.add_argument('--lms-get-vga1-dc-i', action = 'store_true', help = 'Get TxVGA1 DC shift, I channel')
adv_opt.add_argument('--lms-set-vga1-dc-q', type = int, choices = range(0, 255), metavar = '[0..255]', help = 'Set TxVGA1 DC shift, q channel.')
adv_opt.add_argument('--lms-get-vga1-dc-q', action = 'store_true', help = 'Get TxVGA1 DC shift, q channel')
adv_opt.add_argument('--lms-tune-vga1-dc-i', action = 'store_true', help = 'Interactive tuning of TxVGA1 DC shift, I channel')
adv_opt.add_argument('--lms-tune-vga1-dc-q', action = 'store_true', help = 'Interactive tuning of TxVGA1 DC shift, Q channel')
adv_opt.add_argument('--enable-loopback', action = 'store_true', help = 'enable loopback')
args = parser.parse_args()
if args.lms is None: # argparse do not have dependency concept for options
if args.reg is not None or args.data is not None or args.lms_tx_pll_tune is not None \
or args.lms_rx_pll_tune is not None or args.lms_init \
or args.lms_set_tx_pa is not None or args.lms_set_rx_lna is not None \
or args.lms_get_tx_pa or args.lms_get_rx_lna \
or args.lms_lpf_tuning_dc_calibration or args.lms_tx_lpf_dc_calibration \
or args.lms_rx_lpf_dc_calibration or args.lms_rxvga2_dc_calibration \
or args.lms_auto_calibration or args.lms_lpf_bandwidth_tuning \
or args.lms_tx_enable is not None or args.lms_tx_enable is not None:
exit('--lms parameter is required for given options.') # gengetopt is so much better
if args.data is not None:
if args.reg is None:
exit('<data> argument requires <reg> argument.')
if args.lms_tx_pll_tune is not None:
if not 232.5e6 < args.lms_tx_pll_tune <= 3720e6:
exit('<lms-tx-pll-tune> is out of range 232.5e6..3720e6')
if args.lms_rx_pll_tune is not None:
if not 232.5e6 < args.lms_rx_pll_tune <= 3720e6:
exit('<lms-rx-pll-tune> is out of range 232.5e6..3720e6')
if args.lms_init:
if args.reg is not None:
exit('--reg makes no sense with --lms-init, aborting.')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(umtrx_ctrl.UDP_TIMEOUT)
umtrx = umtrx_ctrl.detect(sock, args.umtrx if args.umtrx is not None else args.bcast_addr)
if umtrx is not None: # UmTRX address established
if umtrx_ctrl.ping(sock, umtrx): # UmTRX probed
umtrx_lms_dev = umtrx_ctrl.umtrx_lms_device(sock, umtrx, args.lms if args.lms else 1)
if args.lms_init:
lms_init(umtrx_lms_dev)
elif args.lms_tx_enable is not None:
if args.lms_tx_enable == 0:
lms_tx_disable(umtrx_lms_dev)
elif args.lms_tx_enable == 1:
lms_tx_enable(umtrx_lms_dev)
else:
print('Wrong parameter value for --lms-tx-enable')
elif args.lms_rx_enable is not None:
if args.lms_rx_enable == 0:
lms_rx_disable(umtrx_lms_dev)
elif args.lms_rx_enable == 1:
lms_rx_enable(umtrx_lms_dev)
else:
print('Wrong parameter value for --lms-tx-enable')
elif args.lms_auto_calibration:
# 0x0f - 0.75MHz
lpf_bw_code = args.lpf_bandwidth_code if args.lpf_bandwidth_code is not None else 0x0f
lms_auto_calibration(umtrx_lms_dev, int(args.pll_ref_clock), int(lpf_bw_code))
elif args.lms_lpf_tuning_dc_calibration:
lms_lpf_tuning_dc_calibration(umtrx_lms_dev)
elif args.lms_tx_lpf_dc_calibration:
lms_txrx_lpf_dc_calibration(umtrx_lms_dev, True)
elif args.lms_rx_lpf_dc_calibration:
lms_txrx_lpf_dc_calibration(umtrx_lms_dev, False)
elif args.lms_rxvga2_dc_calibration:
lms_rxvga2_dc_calibration(umtrx_lms_dev)
elif args.lms_set_tx_pa is not None:
lms_set_tx_pa(umtrx_lms_dev, args.lms_set_tx_pa)
elif args.lms_get_tx_pa:
lms_get_tx_pa(umtrx_lms_dev)
elif args.lms_set_rx_lna is not None:
lms_set_rx_lna(umtrx_lms_dev, args.lms_set_rx_lna)
elif args.lms_get_rx_lna:
lms_get_rx_lna(umtrx_lms_dev)
elif args.lms_tx_pll_tune is not None:
lms_tx_pll_tune(umtrx_lms_dev, int(args.pll_ref_clock), int(args.lms_tx_pll_tune))
elif args.lms_rx_pll_tune is not None:
lms_rx_pll_tune(umtrx_lms_dev, int(args.pll_ref_clock), int(args.lms_rx_pll_tune))
elif args.lms_lpf_bandwidth_tuning:
# 0x0f - 0.75MHz
lpf_bw_code = args.lpf_bandwidth_code if args.lpf_bandwidth_code is not None else 0x0f
lms_lpf_bandwidth_tuning(umtrx_lms_dev, int(args.pll_ref_clock), int(lpf_bw_code))
elif args.lms_set_tx_vga1_gain is not None:
lms_set_tx_vga1gain(umtrx_lms_dev, int(args.lms_set_tx_vga1_gain))
elif args.lms_get_tx_vga1_gain:
gain = lms_get_tx_vga1gain(umtrx_lms_dev)
print(gain)
elif args.lms_set_tx_vga2_gain is not None:
lms_set_tx_vga2gain(umtrx_lms_dev, int(args.lms_set_tx_vga2_gain))
elif args.lms_get_tx_vga2_gain:
gain = lms_get_tx_vga2gain(umtrx_lms_dev)
print(gain)
elif args.lms_set_rx_vga1_gain_int is not None:
lms_set_rx_vga1gain_int(umtrx_lms_dev, int(args.lms_set_rx_vga1_gain_int))
elif args.lms_get_rx_vga1_gain_int:
gain = lms_get_rx_vga1gain_int(umtrx_lms_dev)
print(gain)
elif args.lms_set_rx_vga1_gain is not None:
lms_set_rx_vga1gain(umtrx_lms_dev, args.lms_set_rx_vga1_gain)
elif args.lms_get_rx_vga1_gain:
gain = lms_get_rx_vga1gain(umtrx_lms_dev)
print(gain)
elif args.lms_set_rx_vga2_gain is not None:
lms_set_rx_vga2gain(umtrx_lms_dev, int(args.lms_set_rx_vga2_gain))
elif args.lms_get_rx_vga2_gain:
gain = lms_get_rx_vga2gain(umtrx_lms_dev)
print(gain)
elif args.lms_set_vga1_dc_i is not None:
lms_set_vga1dc_i_int(umtrx_lms_dev, int(args.lms_set_vga1_dc_i))
elif args.lms_get_vga1_dc_i:
dc_offset = lms_get_vga1dc_i_int(umtrx_lms_dev)
print(dc_offset)
elif args.lms_set_vga1_dc_q is not None:
lms_set_vga1dc_q_int(umtrx_lms_dev, int(args.lms_set_vga1_dc_q))
elif args.lms_get_vga1_dc_q:
dc_offset = lms_get_vga1dc_q_int(umtrx_lms_dev)
print(dc_offset)
elif args.lms_tune_vga1_dc_i:
for i in range(110, 130, 1):
print("DC offset %f (%d)" % (float(i-128)/16, i))
lms_set_vga1dc_i_int(umtrx_lms_dev, i)
time.sleep(1)
elif args.lms_tune_vga1_dc_q:
for i in range(103, 150, 1):
print("DC offset %f (%d)" % (float(i-128)/16, i))
lms_set_vga1dc_q_int(umtrx_lms_dev, i)
time.sleep(1)
elif args.data is not None:
wrt = umtrx_lms_dev.reg_write(args.reg, args.data)
if args.verify:
vrfy = umtrx_lms_dev.reg_read(args.reg)
print('written 0x%02X to REG 0x%02X - %s' % (vrfy, args.reg, 'OK' if vrfy == args.data else 'FAIL'))
elif args.reg is not None:
print('read 0x%02X from REG 0x%02X' % (umtrx_lms_dev.reg_read(args.reg), args.reg))
elif args.enable_loopback:
enable_loopback(umtrx_lms_dev)
elif args.lms:
lms_regs = dump(umtrx_lms_dev)
print('LMS %u' % args.lms)
print(''.join('# 0x%02X: 0x%02X\n' % data for data in lms_regs))
elif args.dump:
umtrx_lms_dev_1 = umtrx_ctrl.umtrx_lms_device(sock, umtrx, 1)
umtrx_lms_dev_2 = umtrx_ctrl.umtrx_lms_device(sock, umtrx, 2)
lms1 = dump(umtrx_lms_dev_1)
lms2 = dump(umtrx_lms_dev_2)
diff = list(map(lambda l1, l2: 'OK\n' if l1[1] == l2[1] else 'DIFF\n', lms1, lms2))
print(''.join(map(lambda l1, l2, d: '# 0x%02X: LMS1=0x%02X \tLMS2=0x%02X\t%s' % (l1[0], l1[1], l2[1], d), lms1, lms2, diff)))
else:
print('UmTRX suspected at %s' % umtrx)
else:
print('UmTRX at %s is not responding.' % umtrx)
else:
print('No UmTRX detected over %s' % args.bcast_addr)
|
bsd-3-clause
|
GabrielNicolasAvellaneda/dd-agent
|
agent.py
|
22
|
11230
|
#!/opt/datadog-agent/embedded/bin/python
'''
Datadog
www.datadoghq.com
----
Make sense of your IT Data
Licensed under Simplified BSD License (see LICENSE)
(C) Boxed Ice 2010 all rights reserved
(C) Datadog, Inc. 2010-2014 all rights reserved
'''
# set up logging before importing any other components
from config import get_version, initialize_logging # noqa
initialize_logging('collector')
# stdlib
import logging
import os
import signal
import sys
import time
# For pickle & PID files, see issue 293
os.umask(022)
# project
from checks.check_status import CollectorStatus
from checks.collector import Collector
from config import (
get_config,
get_parsed_args,
get_system_stats,
load_check_directory,
)
from daemon import AgentSupervisor, Daemon
from emitter import http_emitter
from util import (
EC2,
get_hostname,
Watchdog,
)
from utils.flare import configcheck, Flare
from utils.jmx import jmx_command
from utils.pidfile import PidFile
from utils.profile import AgentProfiler
# Constants
PID_NAME = "dd-agent"
WATCHDOG_MULTIPLIER = 10
RESTART_INTERVAL = 4 * 24 * 60 * 60 # Defaults to 4 days
START_COMMANDS = ['start', 'restart', 'foreground']
DD_AGENT_COMMANDS = ['check', 'flare', 'jmx']
DEFAULT_COLLECTOR_PROFILE_INTERVAL = 20
# Globals
log = logging.getLogger('collector')
class Agent(Daemon):
"""
The agent class is a daemon that runs the collector in a background process.
"""
def __init__(self, pidfile, autorestart, start_event=True, in_developer_mode=False):
Daemon.__init__(self, pidfile, autorestart=autorestart)
self.run_forever = True
self.collector = None
self.start_event = start_event
self.in_developer_mode = in_developer_mode
def _handle_sigterm(self, signum, frame):
log.debug("Caught sigterm. Stopping run loop.")
self.run_forever = False
if self.collector:
self.collector.stop()
log.debug("Collector is stopped.")
def _handle_sigusr1(self, signum, frame):
self._handle_sigterm(signum, frame)
self._do_restart()
@classmethod
def info(cls, verbose=None):
logging.getLogger().setLevel(logging.ERROR)
return CollectorStatus.print_latest_status(verbose=verbose)
def run(self, config=None):
"""Main loop of the collector"""
# Gracefully exit on sigterm.
signal.signal(signal.SIGTERM, self._handle_sigterm)
# A SIGUSR1 signals an exit with an autorestart
signal.signal(signal.SIGUSR1, self._handle_sigusr1)
# Handle Keyboard Interrupt
signal.signal(signal.SIGINT, self._handle_sigterm)
# Save the agent start-up stats.
CollectorStatus().persist()
# Intialize the collector.
if not config:
config = get_config(parse_args=True)
agentConfig = self._set_agent_config_hostname(config)
hostname = get_hostname(agentConfig)
systemStats = get_system_stats()
emitters = self._get_emitters(agentConfig)
# Load the checks.d checks
checksd = load_check_directory(agentConfig, hostname)
self.collector = Collector(agentConfig, emitters, systemStats, hostname)
# In developer mode, the number of runs to be included in a single collector profile
collector_profile_interval = agentConfig.get('collector_profile_interval',
DEFAULT_COLLECTOR_PROFILE_INTERVAL)
# Configure the watchdog.
check_frequency = int(agentConfig['check_freq'])
watchdog = self._get_watchdog(check_frequency, agentConfig)
# Initialize the auto-restarter
self.restart_interval = int(agentConfig.get('restart_interval', RESTART_INTERVAL))
self.agent_start = time.time()
profiled = False
collector_profiled_runs = 0
# Run the main loop.
while self.run_forever:
# Setup profiling if necessary
if self.in_developer_mode and not profiled:
try:
profiler = AgentProfiler()
profiler.enable_profiling()
profiled = True
except Exception as e:
log.warn("Cannot enable profiler: %s" % str(e))
# Do the work.
self.collector.run(checksd=checksd, start_event=self.start_event)
if profiled:
if collector_profiled_runs >= collector_profile_interval:
try:
profiler.disable_profiling()
profiled = False
collector_profiled_runs = 0
except Exception as e:
log.warn("Cannot disable profiler: %s" % str(e))
# Check if we should restart.
if self.autorestart and self._should_restart():
self._do_restart()
# Only plan for the next loop if we will continue,
# otherwise just exit quickly.
if self.run_forever:
if watchdog:
watchdog.reset()
if profiled:
collector_profiled_runs += 1
time.sleep(check_frequency)
# Now clean-up.
try:
CollectorStatus.remove_latest_status()
except Exception:
pass
# Explicitly kill the process, because it might be running
# as a daemon.
log.info("Exiting. Bye bye.")
sys.exit(0)
def _get_emitters(self, agentConfig):
return [http_emitter]
def _get_watchdog(self, check_freq, agentConfig):
watchdog = None
if agentConfig.get("watchdog", True):
watchdog = Watchdog(check_freq * WATCHDOG_MULTIPLIER,
max_mem_mb=agentConfig.get('limit_memory_consumption', None))
watchdog.reset()
return watchdog
def _set_agent_config_hostname(self, agentConfig):
# Try to fetch instance Id from EC2 if not hostname has been set
# in the config file.
# DEPRECATED
if agentConfig.get('hostname') is None and agentConfig.get('use_ec2_instance_id'):
instanceId = EC2.get_instance_id(agentConfig)
if instanceId is not None:
log.info("Running on EC2, instanceId: %s" % instanceId)
agentConfig['hostname'] = instanceId
else:
log.info('Not running on EC2, using hostname to identify this server')
return agentConfig
def _should_restart(self):
if time.time() - self.agent_start > self.restart_interval:
return True
return False
def _do_restart(self):
log.info("Running an auto-restart.")
if self.collector:
self.collector.stop()
sys.exit(AgentSupervisor.RESTART_EXIT_STATUS)
def main():
options, args = get_parsed_args()
agentConfig = get_config(options=options)
autorestart = agentConfig.get('autorestart', False)
hostname = get_hostname(agentConfig)
in_developer_mode = agentConfig.get('developer_mode')
COMMANDS_AGENT = [
'start',
'stop',
'restart',
'status',
'foreground',
]
COMMANDS_NO_AGENT = [
'info',
'check',
'configcheck',
'jmx',
'flare',
]
COMMANDS = COMMANDS_AGENT + COMMANDS_NO_AGENT
if len(args) < 1:
sys.stderr.write("Usage: %s %s\n" % (sys.argv[0], "|".join(COMMANDS)))
return 2
command = args[0]
if command not in COMMANDS:
sys.stderr.write("Unknown command: %s\n" % command)
return 3
# Deprecation notice
if command not in DD_AGENT_COMMANDS:
# Will become an error message and exit after deprecation period
from utils.deprecations import deprecate_old_command_line_tools
deprecate_old_command_line_tools()
if command in COMMANDS_AGENT:
agent = Agent(PidFile('dd-agent').get_path(), autorestart, in_developer_mode=in_developer_mode)
if command in START_COMMANDS:
log.info('Agent version %s' % get_version())
if 'start' == command:
log.info('Start daemon')
agent.start()
elif 'stop' == command:
log.info('Stop daemon')
agent.stop()
elif 'restart' == command:
log.info('Restart daemon')
agent.restart()
elif 'status' == command:
agent.status()
elif 'info' == command:
return Agent.info(verbose=options.verbose)
elif 'foreground' == command:
logging.info('Running in foreground')
if autorestart:
# Set-up the supervisor callbacks and fork it.
logging.info('Running Agent with auto-restart ON')
def child_func():
agent.start(foreground=True)
def parent_func():
agent.start_event = False
AgentSupervisor.start(parent_func, child_func)
else:
# Run in the standard foreground.
agent.start(foreground=True)
elif 'check' == command:
if len(args) < 2:
sys.stderr.write(
"Usage: %s check <check_name> [check_rate]\n"
"Add check_rate as last argument to compute rates\n"
% sys.argv[0]
)
return 1
check_name = args[1]
try:
import checks.collector
# Try the old-style check first
print getattr(checks.collector, check_name)(log).check(agentConfig)
except Exception:
# If not an old-style check, try checks.d
checks = load_check_directory(agentConfig, hostname)
for check in checks['initialized_checks']:
if check.name == check_name:
if in_developer_mode:
check.run = AgentProfiler.wrap_profiling(check.run)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
if len(args) == 3 and args[2] == 'check_rate':
print "Running 2nd iteration to capture rate metrics"
time.sleep(1)
cs = Collector.run_single_check(check, verbose=True)
print CollectorStatus.render_check_status(cs)
check.stop()
elif 'configcheck' == command or 'configtest' == command:
configcheck()
elif 'jmx' == command:
jmx_command(args[1:], agentConfig)
elif 'flare' == command:
Flare.check_user_rights()
case_id = int(args[1]) if len(args) > 1 else None
f = Flare(True, case_id)
f.collect()
try:
f.upload()
except Exception, e:
print 'The upload failed:\n{0}'.format(str(e))
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except StandardError:
# Try our best to log the error.
try:
log.exception("Uncaught error running the Agent")
except Exception:
pass
raise
|
bsd-3-clause
|
Jandersoft/openshift-ansible
|
roles/lib_zabbix/library/zbx_mediatype.py
|
19
|
5963
|
#!/usr/bin/env python
'''
Ansible module for mediatype
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix mediatype ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_mtype(mtype):
'''
Transport used by the media type.
Possible values:
0 - email;
1 - script;
2 - SMS;
3 - Jabber;
100 - Ez Texting.
'''
mtype = mtype.lower()
media_type = None
if mtype == 'script':
media_type = 1
elif mtype == 'sms':
media_type = 2
elif mtype == 'jabber':
media_type = 3
elif mtype == 'script':
media_type = 100
else:
media_type = 0
return media_type
def main():
'''
Ansible zabbix module for mediatype
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
description=dict(default=None, type='str'),
mtype=dict(default=None, type='str'),
smtp_server=dict(default=None, type='str'),
smtp_helo=dict(default=None, type='str'),
smtp_email=dict(default=None, type='str'),
passwd=dict(default=None, type='str'),
path=dict(default=None, type='str'),
username=dict(default=None, type='str'),
status=dict(default='enabled', type='str'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'mediatype'
idname = "mediatypeid"
description = module.params['description']
state = module.params['state']
content = zapi.get_content(zbx_class_name, 'get', {'search': {'description': description}})
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
if state == 'present':
status = 1
if module.params['status']:
status = 0
params = {'description': description,
'type': get_mtype(module.params['mtype']),
'smtp_server': module.params['smtp_server'],
'smtp_helo': module.params['smtp_helo'],
'smtp_email': module.params['smtp_email'],
'passwd': module.params['passwd'],
'exec_path': module.params['path'],
'username': module.params['username'],
'status': status,
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
# already exists, we need to update it
# let's compare properties
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if zab_results[key] != value and \
zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences[idname] = zab_results[idname]
content = zapi.get_content(zbx_class_name, 'update', differences)
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
apache-2.0
|
hclerdim/cooperative-urbaine
|
vendor/doctrine/orm/docs/en/_exts/configurationblock.py
|
2577
|
3506
|
#Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
mit
|
bytedance/fedlearner
|
web_console_v2/api/test/fedlearner_webconsole/job/yaml_formatter_test.py
|
1
|
6423
|
# Copyright 2021 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
import tarfile
import base64
from io import BytesIO
from google.protobuf.json_format import ParseDict
from fedlearner_webconsole.job.yaml_formatter import format_yaml, code_dict_encode, generate_self_dict
from fedlearner_webconsole.job.models import Job, JobState
from fedlearner_webconsole.proto.workflow_definition_pb2 import JobDefinition
from testing.common import BaseTestCase
class YamlFormatterTest(BaseTestCase):
def test_format_with_phs(self):
project = {
'variables[0]':
{'storage_root_dir': 'root_dir'}
}
workflow = {
'jobs': {
'raw_data_job': {'name': 'raw_data123'}
}
}
yaml = format_yaml("""
{
"name": "OUTPUT_BASE_DIR",
"value": "${project.variables[0].storage_root_dir}/raw_data/${workflow.jobs.raw_data_job.name}"
}
""", project=project, workflow=workflow)
self.assertEqual(yaml, """
{
"name": "OUTPUT_BASE_DIR",
"value": "root_dir/raw_data/raw_data123"
}
""")
self.assertEqual(format_yaml('$project.variables[0].storage_root_dir',
project=project),
project['variables[0]']['storage_root_dir'])
def test_format_with_no_ph(self):
self.assertEqual(format_yaml('{a: 123, b: 234}'),
'{a: 123, b: 234}')
def test_format_yaml_unknown_ph(self):
x = {
'y': 123
}
with self.assertRaises(RuntimeError) as cm:
format_yaml('$x.y is $i.j.k', x=x)
self.assertEqual(str(cm.exception), 'Unknown placeholder: i.j.k')
with self.assertRaises(RuntimeError) as cm:
format_yaml('$x.y is ${i.j}', x=x)
self.assertEqual(str(cm.exception), 'Unknown placeholder: i.j')
def test_encode_code(self):
test_data = {'test/a.py': 'awefawefawefawefwaef',
'test1/b.py': 'asdfasd',
'c.py': '',
'test/d.py': 'asdf'}
code_base64 = code_dict_encode(test_data)
code_dict = {}
if code_base64.startswith('base64://'):
tar_binary = BytesIO(base64.b64decode(code_base64[9:]))
with tarfile.open(fileobj=tar_binary) as tar:
for file in tar.getmembers():
code_dict[file.name] = str(tar.extractfile(file).read(),
encoding='utf-8')
self.assertEqual(code_dict, test_data)
def test_generate_self_dict(self):
config = {
'variables': [
{
'name': 'namespace',
'value': 'leader'
},
{
'name': 'basic_envs',
'value': '{}'
},
{
'name': 'storage_root_dir',
'value': '/'
},
{
'name': 'EGRESS_URL',
'value': '127.0.0.1:1991'
}
]
}
job = Job(name='aa', project_id=1, workflow_id=1, state=JobState.NEW)
job.set_config(ParseDict(config, JobDefinition()))
self.assertEqual(generate_self_dict(job),
{'id': None, 'name': 'aa',
'job_type': None, 'state': 'NEW', 'config':
{'expert_mode': False,
'variables': [
{
'name': 'namespace',
'value': 'leader',
'access_mode': 'UNSPECIFIED',
'widget_schema': '',
'value_type': 'STRING'},
{
'name': 'basic_envs',
'value': '{}',
'access_mode': 'UNSPECIFIED',
'widget_schema': '',
'value_type': 'STRING'},
{
'name': 'storage_root_dir',
'value': '/',
'access_mode': 'UNSPECIFIED',
'widget_schema': '',
'value_type': 'STRING'},
{
'name': 'EGRESS_URL',
'value': '127.0.0.1:1991',
'access_mode': 'UNSPECIFIED',
'widget_schema': '',
'value_type': 'STRING'}],
'name': '',
'job_type': 'UNSPECIFIED',
'is_federated': False,
'dependencies': [],
'yaml_template': ''},
'is_disabled': None, 'workflow_id': 1, 'project_id': 1, 'flapp_snapshot': None,
'pods_snapshot': None, 'error_message': None, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'pods': [], 'complete_at': None,
'variables': {'namespace': 'leader', 'basic_envs': '{}', 'storage_root_dir': '/',
'EGRESS_URL': '127.0.0.1:1991'}}
)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
emory-libraries/findingaids
|
findingaids/fa/tests/models.py
|
1
|
16453
|
# file findingaids/fa/tests/models.py
#
# Copyright 2012 Emory University Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
from types import ListType
from mock import patch
from django.conf import settings
from django.test import TestCase as DjangoTestCase
from django.test.utils import override_settings
from eulxml.xmlmap import load_xmlobject_from_file, load_xmlobject_from_string
from eulxml.xmlmap.eadmap import EAD_NAMESPACE
from eulexistdb.testutil import TestCase
from findingaids.fa.models import FindingAid, LocalComponent, EadRepository, \
Series, Title, PhysicalDescription
# from findingaids.fa.utils import pages_to_show, ead_lastmodified, \
# collection_lastmodified
## unit tests for model objects in findingaids.fa
exist_fixture_path = path.join(path.dirname(path.abspath(__file__)), 'fixtures')
exist_index_path = path.join(path.dirname(path.abspath(__file__)), '..', '..', 'exist_index.xconf')
class FindingAidTestCase(DjangoTestCase):
# test finding aid model (customization of eulxml xmlmap ead object)
FIXTURES = ['leverette135.xml', # simple finding aid (no series/subseries), origination is a person name
'abbey244.xml', # finding aid with series (no subseries), origination is a corporate name
'raoul548.xml', # finding aid with series & subseries, origination is a family name
'bailey807.xml', # finding aid with series, no origination
'adams465.xml',
'pomerantz890.xml' # finding aid with multiple subareas
]
def setUp(self):
self.findingaid = dict()
for file in self.FIXTURES:
filebase = file.split('.')[0]
self.findingaid[filebase] = load_xmlobject_from_file(path.join(exist_fixture_path,
file), FindingAid)
def test_init(self):
for file, fa in self.findingaid.iteritems():
self.assert_(isinstance(fa, FindingAid))
def test_custom_fields(self):
# list title variants
# NOTE: list_title is now a NodeField; calling __unicode__ explicitly to do a string compare
# - origination, person name
self.assertEqual("Leverette, Fannie Lee.", self.findingaid['leverette135'].list_title.__unicode__())
self.assertEqual("L", self.findingaid['leverette135'].first_letter)
# - origination, corporate name
self.assertEqual("Abbey Theatre.", self.findingaid['abbey244'].list_title.__unicode__())
self.assertEqual("A", self.findingaid['abbey244'].first_letter)
# - origination, family name
self.assertEqual("Raoul family.", self.findingaid['raoul548'].list_title.__unicode__())
self.assertEqual("R", self.findingaid['raoul548'].first_letter)
# - no origination - list title falls back to unit title
self.assertEqual("Bailey and Thurman families papers, circa 1882-1995",
self.findingaid['bailey807'].list_title.__unicode__())
self.assertEqual("B", self.findingaid['bailey807'].first_letter)
# dc_subjects
self.assert_(u'Irish drama--20th century.' in self.findingaid['abbey244'].dc_subjects)
self.assert_(u'Theater--Ireland--20th century.' in self.findingaid['abbey244'].dc_subjects)
self.assert_(u'Dublin (Ireland)' in self.findingaid['abbey244'].dc_subjects)
# dc_contributors
self.assert_(u'Bailey, I. G. (Issac George), 1847-1914.' in self.findingaid['bailey807'].dc_contributors)
self.assert_(u'Bailey, Susie E., d. 1948.' in self.findingaid['bailey807'].dc_contributors)
self.assert_(u'Thurman, Howard, 1900-1981.' in self.findingaid['bailey807'].dc_contributors)
self.assert_(u'Thurman, Sue Bailey.' in self.findingaid['bailey807'].dc_contributors)
def test_has_digital_content(self):
# abbey has a dao, but audience is internal
self.assertFalse(self.findingaid['abbey244'].has_digital_content)
# no dao in adams
self.assertFalse(self.findingaid['adams465'].has_digital_content)
# daos have been added to leverette fixture
self.assertTrue(self.findingaid['leverette135'].has_digital_content)
def test_stored_offsite(self):
self.assertFalse(self.findingaid['abbey244'].stored_offsite)
self.assertTrue(self.findingaid['pomerantz890'].stored_offsite)
def test_series_info(self):
info = self.findingaid['raoul548'].dsc.c[0].series_info()
self.assert_(isinstance(info, ListType))
self.assertEqual("Scope and Content Note", unicode(info[0].head))
self.assertEqual("Arrangement Note", unicode(info[1].head))
# series info problem when scopecontent is missing a <head>; contains use restriction
info = self.findingaid['raoul548'].dsc.c[-1].c[-1].series_info()
self.assert_(isinstance(info, ListType))
self.assert_("contains all materials related to " in
info[0].content[0].__unicode__()) # scopecontent with no head
self.assertEqual("Arrangement Note", unicode(info[1].head))
self.assertEqual("Terms Governing Use and Reproduction", unicode(info[2].head))
self.assertEqual("Restrictions on Access", unicode(info[3].head))
def test_series_displaylabel(self):
self.assertEqual("Series 1: Letters and personal papers, 1865-1982",
self.findingaid['raoul548'].dsc.c[0].display_label())
# no unitid
self.assertEqual("Financial and legal papers, 1890-1970",
self.findingaid['raoul548'].dsc.c[2].display_label())
def test_dc_fields(self):
fields = self.findingaid['abbey244'].dc_fields()
self.assert_("Abbey Theatre collection, 1921-1995" in [title.__unicode__() for title in fields["title"]])
self.assert_("Abbey Theatre." in fields["creator"])
self.assert_("Emory University" in fields["publisher"])
self.assert_("2002-02-24" in fields["date"])
self.assert_("eng" in fields["language"])
self.assert_("Irish drama--20th century." in fields["subject"])
self.assert_("Theater--Ireland--20th century." in fields["subject"])
self.assert_("Dublin (Ireland)" in fields["subject"])
self.assert_("http://pidtest.library.emory.edu/ark:/25593/1fx" in fields["identifier"])
fields = self.findingaid['bailey807'].dc_fields()
self.assert_("Bailey, I. G. (Issac George), 1847-1914." in fields["contributor"])
self.assert_("Bailey, Susie E., d. 1948." in fields["contributor"])
self.assert_("Thurman, Howard, 1900-1981." in fields["contributor"])
self.assert_("Thurman, Sue Bailey." in fields["contributor"])
def test_local_component(self):
# local component with custom property - first_file_item
self.assert_(isinstance(self.findingaid['abbey244'].dsc.c[0], LocalComponent))
self.assert_(isinstance(self.findingaid['abbey244'].dsc.c[0].c[0], LocalComponent))
# abbey244 series 1 - no section headings, first c should be first file
self.assertTrue(self.findingaid['abbey244'].dsc.c[0].c[0].first_file_item)
self.assertFalse(self.findingaid['abbey244'].dsc.c[0].c[1].first_file_item)
self.assertFalse(self.findingaid['abbey244'].dsc.c[0].c[-1].first_file_item)
# raoul548 series 1.1 - first item is a section, second item should be first file
self.assertFalse(self.findingaid['raoul548'].dsc.c[0].c[0].c[0].first_file_item)
self.assertTrue(self.findingaid['raoul548'].dsc.c[0].c[0].c[1].first_file_item)
def test_absolute_eadxml_url(self):
# test against current site domain
url = self.findingaid['abbey244'].absolute_eadxml_url()
self.assert_(self.findingaid['abbey244'].eadid.value in url,
'URL should contain the EAD ID for this current document.')
@override_settings(REQUEST_MATERIALS_URL='http://example.com')
def test_requestable(self):
fa = self.findingaid['abbey244']
fa2 = self.findingaid['bailey807']
fa3 = self.findingaid['pomerantz890'] # EAD with multiple subareas
with override_settings(REQUEST_MATERIALS_REPOS = [
'Manuscript, Archives, and Rare Book Library',
'Emory University Archives'
]):
self.assertTrue(fa.requestable(),"EAD from Marbl should be able to be requested.")
# Fail if the REQUEST_MATERIALS_URL is empty
with override_settings(REQUEST_MATERIALS_URL = ''):
self.assertFalse(fa.requestable(),"Cannot request EAD if the REQUEST_MATERIALS_URL is not set.")
# Fail if the REQUEST_MATERIALS_REPOS is empty
with override_settings(REQUEST_MATERIALS_REPOS = ''):
self.assertFalse(fa.requestable(),"Cannot request EAD if the REQUEST_MATERIALS_REPOS is not set.")
# Fail if the requested EAD repo is not set in REQUEST_MATERIALS_REPOS
with override_settings(REQUEST_MATERIALS_REPOS = [
'Manuscript, Archives, and Rare Book Library'
]):
self.assertFalse(fa2.requestable(),"EAD from University Archives (not set) shouldn't be able to be requested.")
# Multiple subareas per one EAD
with override_settings(REQUEST_MATERIALS_REPOS = [
'Pitts Theology Library'
]):
self.assertTrue(fa3.requestable(),"Even if there are multiple subareas, an EAD from the set repos should be able to be requested.")
@override_settings(REQUEST_MATERIALS_URL='http://example.com')
def test_request_materials_url(self):
fa = self.findingaid['abbey244']
self.assert_(fa.request_materials_url())
del settings.REQUEST_MATERIALS_URL
self.assertFalse(fa.request_materials_url(),'Cannot return a request materials url if the setting is None')
class EadRepositoryTestCase(TestCase):
exist_fixtures = {'files': [path.join(exist_fixture_path, 'pomerantz890.xml')] }
def test_distinct(self):
repos = EadRepository.distinct()
# should be a distinct, space-normalized list of subareas
self.assert_('Pitts Theology Library' in repos)
self.assert_('Manuscript, Archives, and Rare Book Library' in repos)
class SeriesTestCase(DjangoTestCase):
# plain file item with no semantic tags
c1 = load_xmlobject_from_string('''<c02 xmlns="%s" level="file">
<did>
<container type="box">1</container>
<container type="folder">1</container>
<unittitle>Acey, J. Earl and Port Scott, July 10, 1991. [Cassette
available]</unittitle>
</did>
</c02>''' % EAD_NAMESPACE, Series)
# simple tagged person name in the unittitle
c2 = load_xmlobject_from_string('''<c02 xmlns="%s" level="file">
<did>
<container type="box">1</container>
<container type="folder">1</container>
<unittitle><persname>Acey, J. Earl</persname> and Port Scott, July 10, 1991. [Cassette
available]</unittitle>
</did>
</c02>''' % EAD_NAMESPACE, Series)
# tagged title with source & authfilenumber
c3 = load_xmlobject_from_string('''<c02 xmlns="%s" level="file">
<did>
<container type="box">10</container>
<container type="folder">24</container>
<unittitle>
<title type="scripts" source="OCLC" authfilenumber="434083314">Bayou Legend</title>, notes</unittitle>
</did>
</c02>''' % EAD_NAMESPACE, Series)
# issn title
c4 = load_xmlobject_from_string('''<c02 xmlns="%s" level="file">
<did>
<container type="box">19</container>
<container type="folder">3</container>
<unittitle><title render="doublequote" type="article">Who Has Seen the Wind?</title> <title source="ISSN" authfilenumber="2163-6206">New York Amsterdam News</title>, National Scene Magazine Supplement, November-December 1976</unittitle>
</did></c02>''' % EAD_NAMESPACE, Series)
c5 = load_xmlobject_from_string('''<c02 xmlns="%s" level="file">
<did>
<container type="box">60</container>
<container type="folder">3</container>
<unittitle>
<persname authfilenumber="109557338" role="dc:creator" source="viaf">Heaney, Seamus</persname>,
<date normal="1965-04-27">April 27, 1965</date>:
<title render="doublequote">Boy Driving his Father to Confession</title>,
<title render="doublequote">To A Wine Jar</title>,
<title render="doublequote">On Hogarth's Engraving 'Pit Ticket for the Royal Sport'</title>
</unittitle>
</did>
</c02>''' % EAD_NAMESPACE, Series)
def test_has_semantic_data(self):
self.assertFalse(self.c1.has_semantic_data)
self.assertTrue(self.c2.has_semantic_data)
self.assertTrue(self.c3.has_semantic_data)
self.assertTrue(self.c4.has_semantic_data)
self.assertTrue(self.c5.has_semantic_data)
def test_rdf_type(self):
# not enough information to determine type
self.assertEqual(None, self.c1.rdf_type)
# infer book, article, etc from title attributes
self.assertEqual('bibo:Book', self.c3.rdf_type)
self.assertEqual('bibo:Article', self.c4.rdf_type)
# type inferred based on series; requires access to series, so load from fixtures
# - bailey findingaid contains printed material, photographs, and audiovisual
bailey = load_xmlobject_from_file(path.join(exist_fixture_path, 'bailey807.xml'),
FindingAid)
# patch in unittitles so it looks as though items have semantic data
with patch('findingaids.fa.models.Series.unittitle_titles', new=[Title()]):
# series 4 is printed material
self.assertEqual('bibo:Document', bailey.dsc.c[3].c[0].rdf_type,
'items in printed materials series should default to document type')
# series 5 is photographs
self.assertEqual('bibo:Image', bailey.dsc.c[4].c[0].rdf_type,
'items in photograph series should default to image type')
# series 9 is audiovisual
self.assertEqual('bibo:AudioVisualDocument', bailey.dsc.c[8].c[0].rdf_type,
'items in audiovisual series should default to audiovisualdocument type')
# fallback type is manuscript
self.assertEqual('bibo:Manuscript', bailey.dsc.c[0].c[0].rdf_type,
'items in photograph series should default to image type')
class PhysicalDescriptionTestCase(DjangoTestCase):
# multiple extents with separating text
physdesc1 = load_xmlobject_from_string('''<physdesc xmlns="%s" encodinganalog="300"><extent>5.25 linear ft.</extent>
<extent> (7 boxes)</extent>,
<extent>2 bound volumes (BV)</extent>, and
<extent>11 oversized papers (OP)</extent></physdesc>''' % EAD_NAMESPACE, PhysicalDescription)
# extents with only space, no punctuation
physdesc2 = load_xmlobject_from_string('''<physdesc xmlns="%s" encodinganalog="300"><extent>4 linear ft.</extent>
<extent>(8 boxes)</extent></physdesc>''' % EAD_NAMESPACE, PhysicalDescription)
# extents with no space, no punctuation
physdesc3 = load_xmlobject_from_string('''<physdesc xmlns="%s" encodinganalog="300"><extent>4 linear ft.</extent><extent>(8 boxes)</extent></physdesc>''' % EAD_NAMESPACE, PhysicalDescription)
def test_unicode(self):
self.assertEqual(u'5.25 linear ft. (7 boxes), 2 bound volumes (BV), and 11 oversized papers (OP)',
unicode(self.physdesc1))
# should have a space between extents, whether or not it is present in the xml
self.assertEqual(u'4 linear ft. (8 boxes)', unicode(self.physdesc2))
self.assertEqual(u'4 linear ft. (8 boxes)', unicode(self.physdesc3))
|
apache-2.0
|
koldunovn/folium
|
docs/conf.py
|
9
|
7719
|
# -*- coding: utf-8 -*-
#
# Folium documentation build configuration file, created by
# sphinx-quickstart on Sun May 19 19:39:34 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Folium'
copyright = '2013, Rob Story'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.2'
# The full version, including alpha/beta/rc tags.
release = '0.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'f6'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Foliumdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Folium.tex', 'Folium Documentation',
'Rob Story', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'folium', 'Folium Documentation',
['Rob Story'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Folium', 'Folium Documentation',
'Rob Story', 'Folium', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mit
|
andrewleech/SickRage
|
lib/requests/packages/chardet/langgreekmodel.py
|
2763
|
12628
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
|
gpl-3.0
|
lordmuffin/aws-cfn-plex
|
functions/credstash/pip/_vendor/requests/packages/urllib3/util/connection.py
|
365
|
4744
|
from __future__ import absolute_import
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
|
mit
|
willzhang05/postgrestesting1
|
postgrestesting1/lib/python3.5/site-packages/gunicorn/selectors.py
|
107
|
18997
|
"""Selectors module.
This module allows high-level and efficient I/O multiplexing, built upon the
`select` module primitives.
The following code adapted from trollius.selectors.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple, Mapping
import math
import select
import sys
from gunicorn._compat import wrap_error, InterruptedError
from gunicorn import six
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, six.integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
"""Object used to associate a file object to its backing file descriptor,
selected event mask and attached data."""
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(six.with_metaclass(ABCMeta)):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
I/O events.
A file object is a file descriptor or any object with a `fileno()` method.
An arbitrary object can be attached to the file object, which can be used
for example to store context information, a callback, etc.
A selector can use various implementations (select(), poll(), epoll()...)
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
@abstractmethod
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
raise NotImplementedError
@abstractmethod
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
raise NotImplementedError
def modify(self, fileobj, events, data=None):
"""Change a registered file object monitored events or attached data.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
Anything that unregister() or register() raises
"""
self.unregister(fileobj)
return self.register(fileobj, events, data)
@abstractmethod
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
raise NotImplementedError
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
pass
def get_key(self, fileobj):
"""Return the key associated to a registered file object.
Returns:
SelectorKey for this file object
"""
mapping = self.get_map()
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
@abstractmethod
def get_map(self):
"""Return a mapping of file objects to selector keys."""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class _BaseSelectorImpl(BaseSelector):
"""Base selector implementation."""
def __init__(self):
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
# TODO: Subclasses can probably optimize this even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def close(self):
self._fd_to_key.clear()
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
if sys.platform == 'win32':
def _select(self, r, w, _, timeout=None):
r, w, x = select.select(r, w, w, timeout)
return r, w + x, []
else:
_select = select.select
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
r, w, _ = wrap_error(self._select,
self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'poll'):
class PollSelector(_BaseSelectorImpl):
"""Poll-based selector."""
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._poll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = int(math.ceil(timeout * 1e3))
ready = []
try:
fd_event_list = wrap_error(self._poll.poll, timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'epoll'):
class EpollSelector(_BaseSelectorImpl):
"""Epoll-based selector."""
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
if events & EVENT_WRITE:
epoll_events |= select.EPOLLOUT
self._epoll.register(key.fd, epoll_events)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
self._epoll.unregister(key.fd)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
return key
def select(self, timeout=None):
if timeout is None:
timeout = -1
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond, round away
# from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3) * 1e-3
max_ev = len(self._fd_to_key)
ready = []
try:
fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.EPOLLIN:
events |= EVENT_WRITE
if event & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, 'devpoll'):
class DevpollSelector(_BaseSelectorImpl):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def select(self, timeout=None):
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# devpoll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._devpoll.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.POLLIN:
events |= EVENT_WRITE
if event & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
if hasattr(select, 'kqueue'):
class KqueueSelector(_BaseSelectorImpl):
"""Kqueue-based selector."""
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
if events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
self._kqueue.control([kev], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
if key.events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
self._kqueue.control([kev], 0, 0)
except OSError:
# See comment above.
pass
return key
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
max_ev = len(self._fd_to_key)
ready = []
try:
kev_list = wrap_error(self._kqueue.control,
None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
fd = kev.ident
flag = kev.filter
events = 0
if flag == select.KQ_FILTER_READ:
events |= EVENT_READ
if flag == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals():
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals():
DefaultSelector = EpollSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'PollSelector' in globals():
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
|
mit
|
abridgett/boto
|
tests/integration/redshift/test_layer1.py
|
133
|
5384
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import unittest
import time
from nose.plugins.attrib import attr
from boto.redshift.layer1 import RedshiftConnection
from boto.redshift.exceptions import ClusterNotFoundFault
from boto.redshift.exceptions import ResizeNotFoundFault
class TestRedshiftLayer1Management(unittest.TestCase):
redshift = True
def setUp(self):
self.api = RedshiftConnection()
self.cluster_prefix = 'boto-redshift-cluster-%s'
self.node_type = 'dw.hs1.xlarge'
self.master_username = 'mrtest'
self.master_password = 'P4ssword'
self.db_name = 'simon'
# Redshift was taking ~20 minutes to bring clusters up in testing.
self.wait_time = 60 * 20
def cluster_id(self):
# This need to be unique per-test method.
return self.cluster_prefix % str(int(time.time()))
def create_cluster(self):
cluster_id = self.cluster_id()
self.api.create_cluster(
cluster_id, self.node_type,
self.master_username, self.master_password,
db_name=self.db_name, number_of_nodes=3
)
# Wait for it to come up.
time.sleep(self.wait_time)
self.addCleanup(self.delete_cluster_the_slow_way, cluster_id)
return cluster_id
def delete_cluster_the_slow_way(self, cluster_id):
# Because there might be other operations in progress. :(
time.sleep(self.wait_time)
self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True)
@attr('notdefault')
def test_create_delete_cluster(self):
cluster_id = self.cluster_id()
self.api.create_cluster(
cluster_id, self.node_type,
self.master_username, self.master_password,
db_name=self.db_name, number_of_nodes=3
)
# Wait for it to come up.
time.sleep(self.wait_time)
self.api.delete_cluster(cluster_id, skip_final_cluster_snapshot=True)
@attr('notdefault')
def test_as_much_as_possible_before_teardown(self):
# Per @garnaat, for the sake of suite time, we'll test as much as we
# can before we teardown.
# Test a non-existent cluster ID.
with self.assertRaises(ClusterNotFoundFault):
self.api.describe_clusters('badpipelineid')
# Now create the cluster & move on.
cluster_id = self.create_cluster()
# Test never resized.
with self.assertRaises(ResizeNotFoundFault):
self.api.describe_resize(cluster_id)
# The cluster shows up in describe_clusters
clusters = self.api.describe_clusters()['DescribeClustersResponse']\
['DescribeClustersResult']\
['Clusters']
cluster_ids = [c['ClusterIdentifier'] for c in clusters]
self.assertIn(cluster_id, cluster_ids)
# The cluster shows up in describe_clusters w/ id
response = self.api.describe_clusters(cluster_id)
self.assertEqual(response['DescribeClustersResponse']\
['DescribeClustersResult']['Clusters'][0]\
['ClusterIdentifier'], cluster_id)
snapshot_id = "snap-%s" % cluster_id
# Test creating a snapshot.
response = self.api.create_cluster_snapshot(snapshot_id, cluster_id)
self.assertEqual(response['CreateClusterSnapshotResponse']\
['CreateClusterSnapshotResult']['Snapshot']\
['SnapshotIdentifier'], snapshot_id)
self.assertEqual(response['CreateClusterSnapshotResponse']\
['CreateClusterSnapshotResult']['Snapshot']\
['Status'], 'creating')
self.addCleanup(self.api.delete_cluster_snapshot, snapshot_id)
# More waiting. :(
time.sleep(self.wait_time)
# Describe the snapshots.
response = self.api.describe_cluster_snapshots(
cluster_identifier=cluster_id
)
snap = response['DescribeClusterSnapshotsResponse']\
['DescribeClusterSnapshotsResult']['Snapshots'][-1]
self.assertEqual(snap['SnapshotType'], 'manual')
self.assertEqual(snap['DBName'], self.db_name)
|
mit
|
kinnou02/navitia
|
source/sql/alembic/versions/538bc4ea9cd1_multiple_comments.py
|
4
|
2894
|
"""multiple comments
Remove all comment column and add 2 new tables:
* one comment table
* one table to make the link between pt object and the comments
Revision ID: 538bc4ea9cd1
Revises: 29fc422c56cb
Create Date: 2015-05-05 11:03:45.982893
"""
# revision identifiers, used by Alembic.
revision = '538bc4ea9cd1'
down_revision = '2c510fae878d'
from alembic import op
import sqlalchemy as sa
import geoalchemy2 as ga
def upgrade():
op.create_table(
'comments',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('comment', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint('id'),
schema='navitia',
)
op.create_table(
'ptobject_comments',
sa.Column('id', sa.BIGINT(), nullable=False),
sa.Column('object_type', sa.TEXT(), nullable=False),
sa.Column('object_id', sa.BIGINT(), nullable=False),
sa.Column('comment_id', sa.BIGINT(), nullable=False),
sa.ForeignKeyConstraint(
['comment_id'], [u'navitia.comments.id'], name=u'ptobject_comments_comment_id_fkey'
),
sa.PrimaryKeyConstraint('id'),
schema='navitia',
)
op.drop_column('company', 'comment', schema='navitia')
op.drop_column('journey_pattern', 'comment', schema='navitia')
op.drop_column('journey_pattern_point', 'comment', schema='navitia')
op.drop_column('line', 'comment', schema='navitia')
op.drop_column('network', 'comment', schema='navitia')
op.drop_column('route', 'comment', schema='navitia')
op.drop_column('stop_area', 'comment', schema='navitia')
op.drop_column('stop_point', 'comment', schema='navitia')
op.drop_column('stop_time', 'comment', schema='navitia')
op.drop_column('vehicle_journey', 'comment', schema='navitia')
def downgrade():
op.add_column('vehicle_journey', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('stop_time', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('stop_point', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('stop_area', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('route', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('network', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('line', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('journey_pattern_point', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('journey_pattern', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.add_column('company', sa.Column('comment', sa.TEXT(), nullable=True), schema='navitia')
op.drop_table('ptobject_comments', schema='navitia')
op.drop_table('comments', schema='navitia')
|
agpl-3.0
|
currychou/1
|
static/Brython3.1.3-20150514-095342/Lib/bisect.py
|
1261
|
2595
|
"""Bisection algorithms."""
def insort_right(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the right of the rightmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
insort = insort_right # backward compatibility
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
bisect = bisect_right # backward compatibility
def insort_left(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted.
If x is already in a, insert it to the left of the leftmost x.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
a.insert(lo, x)
def bisect_left(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
insert just before the leftmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
# Overwrite above definitions with a fast C implementation
try:
from _bisect import *
except ImportError:
pass
|
gpl-3.0
|
trdean/grEME
|
gr-blocks/python/blocks/qa_ctrlport_probes.py
|
8
|
7371
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import Ice
import sys, time, random, numpy
from gnuradio import gr, gr_unittest, blocks
from gnuradio.ctrlport import GNURadio
from gnuradio import ctrlport
import os, struct
class test_ctrlport_probes(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
os.environ['GR_CONF_CONTROLPORT_ON'] = 'True'
def tearDown(self):
self.tb = None
def test_001(self):
data = range(1,9)
self.src = blocks.vector_source_c(data, True)
self.probe = blocks.ctrlport_probe2_c("samples","Complex",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
# Probes return complex values as list of floats with re, im
# Imaginary parts of this data set are 0.
expected_result = [1, 0, 2, 0, 3, 0, 4, 0,
5, 0, 6, 0, 7, 0, 8, 0]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
# Initialize a simple Ice client from endpoint
ic = Ice.initialize(sys.argv)
base = ic.stringToProxy(ep)
radio = GNURadio.ControlPortPrx.checkedCast(base)
# Get all exported knobs
ret = radio.get([probe_name + "::samples"])
for name in ret.keys():
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
def test_002(self):
data = range(1,9)
self.src = blocks.vector_source_f(data, True)
self.probe = blocks.ctrlport_probe2_f("samples","Floats",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8,]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
# Initialize a simple Ice client from endpoint
ic = Ice.initialize(sys.argv)
base = ic.stringToProxy(ep)
radio = GNURadio.ControlPortPrx.checkedCast(base)
# Get all exported knobs
ret = radio.get([probe_name + "::samples"])
for name in ret.keys():
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
def test_003(self):
data = range(1,9)
self.src = blocks.vector_source_i(data, True)
self.probe = blocks.ctrlport_probe2_i("samples","Integers",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8,]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
# Initialize a simple Ice client from endpoint
ic = Ice.initialize(sys.argv)
base = ic.stringToProxy(ep)
radio = GNURadio.ControlPortPrx.checkedCast(base)
# Get all exported knobs
ret = radio.get([probe_name + "::samples"])
for name in ret.keys():
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
def test_004(self):
data = range(1,9)
self.src = blocks.vector_source_s(data, True)
self.probe = blocks.ctrlport_probe2_s("samples","Shorts",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8,]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
# Initialize a simple Ice client from endpoint
ic = Ice.initialize(sys.argv)
base = ic.stringToProxy(ep)
radio = GNURadio.ControlPortPrx.checkedCast(base)
# Get all exported knobs
ret = radio.get([probe_name + "::samples"])
for name in ret.keys():
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
i = result.index(1.0)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
def test_005(self):
data = range(1,9)
self.src = blocks.vector_source_b(data, True)
self.probe = blocks.ctrlport_probe2_b("samples","Bytes",
len(data), gr.DISPNULL)
probe_name = self.probe.alias()
self.tb.connect(self.src, self.probe)
self.tb.start()
expected_result = [1, 2, 3, 4, 5, 6, 7, 8,]
# Make sure we have time for flowgraph to run
time.sleep(0.1)
# Get available endpoint
ep = gr.rpcmanager_get().endpoints()[0]
# Initialize a simple Ice client from endpoint
ic = Ice.initialize(sys.argv)
base = ic.stringToProxy(ep)
radio = GNURadio.ControlPortPrx.checkedCast(base)
# Get all exported knobs
ret = radio.get([probe_name + "::samples"])
for name in ret.keys():
# Get data in probe, which might be offset; find the
# beginning and unwrap.
result = ret[name].value
result = list(struct.unpack(len(result)*'b', result))
i = result.index(1)
result = result[i:] + result[0:i]
self.assertEqual(expected_result, result)
self.tb.stop()
if __name__ == '__main__':
gr_unittest.run(test_ctrlport_probes, "test_ctrlport_probes.xml")
|
gpl-3.0
|
citassa1985/youtube-dl
|
youtube_dl/extractor/musicplayon.py
|
150
|
2670
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class MusicPlayOnIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?musicplayon\.com/play(?:-touch)?\?(?:v|pl=100&play)=(?P<id>\d+)'
_TEST = {
'url': 'http://en.musicplayon.com/play?v=433377',
'info_dict': {
'id': '433377',
'ext': 'mp4',
'title': 'Rick Ross - Interview On Chelsea Lately (2014)',
'description': 'Rick Ross Interview On Chelsea Lately',
'duration': 342,
'uploader': 'ultrafish',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id)
title = self._og_search_title(page)
description = self._og_search_description(page)
thumbnail = self._og_search_thumbnail(page)
duration = self._html_search_meta('video:duration', page, 'duration', fatal=False)
view_count = self._og_search_property('count', page, fatal=False)
uploader = self._html_search_regex(
r'<div>by <a href="[^"]+" class="purple">([^<]+)</a></div>', page, 'uploader', fatal=False)
formats = [
{
'url': 'http://media0-eu-nl.musicplayon.com/stream-mobile?id=%s&type=.mp4' % video_id,
'ext': 'mp4',
}
]
manifest = self._download_webpage(
'http://en.musicplayon.com/manifest.m3u8?v=%s' % video_id, video_id, 'Downloading manifest')
for entry in manifest.split('#')[1:]:
if entry.startswith('EXT-X-STREAM-INF:'):
meta, url, _ = entry.split('\n')
params = dict(param.split('=') for param in meta.split(',')[1:])
formats.append({
'url': url,
'ext': 'mp4',
'tbr': int(params['BANDWIDTH']),
'width': int(params['RESOLUTION'].split('x')[1]),
'height': int(params['RESOLUTION'].split('x')[-1]),
'format_note': params['NAME'].replace('"', '').strip(),
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': int_or_none(duration),
'view_count': int_or_none(view_count),
'formats': formats,
}
|
unlicense
|
nanuxbe/django-modeltranslation
|
modeltranslation/utils.py
|
7
|
5531
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import get_language as _get_language
from django.utils.translation import get_language_info
from django.utils.functional import lazy
from modeltranslation import settings
def get_language():
"""
Return an active language code that is guaranteed to be in
settings.LANGUAGES (Django does not seem to guarantee this for us).
"""
lang = _get_language()
if lang is None: # Django >= 1.8
return settings.DEFAULT_LANGUAGE
if lang not in settings.AVAILABLE_LANGUAGES and '-' in lang:
lang = lang.split('-')[0]
if lang in settings.AVAILABLE_LANGUAGES:
return lang
return settings.DEFAULT_LANGUAGE
def get_language_bidi(lang):
"""
Check if a language is bi-directional.
"""
lang_info = get_language_info(lang)
return lang_info['bidi']
def get_translation_fields(field):
"""
Returns a list of localized fieldnames for a given field.
"""
return [build_localized_fieldname(field, l) for l in settings.AVAILABLE_LANGUAGES]
def build_localized_fieldname(field_name, lang):
if lang == 'id':
# The 2-letter Indonesian language code is problematic with the
# current naming scheme as Django foreign keys also add "id" suffix.
lang = 'ind'
return str('%s_%s' % (field_name, lang.replace('-', '_')))
def _build_localized_verbose_name(verbose_name, lang):
if lang == 'id':
lang = 'ind'
return force_text('%s [%s]') % (force_text(verbose_name), lang)
build_localized_verbose_name = lazy(_build_localized_verbose_name, six.text_type)
def _join_css_class(bits, offset):
if '-'.join(bits[-offset:]) in settings.AVAILABLE_LANGUAGES + ['en-us']:
return '%s-%s' % ('_'.join(bits[:len(bits) - offset]), '_'.join(bits[-offset:]))
return ''
def build_css_class(localized_fieldname, prefix=''):
"""
Returns a css class based on ``localized_fieldname`` which is easily
splitable and capable of regionalized language codes.
Takes an optional ``prefix`` which is prepended to the returned string.
"""
bits = localized_fieldname.split('_')
css_class = ''
if len(bits) == 1:
css_class = str(localized_fieldname)
elif len(bits) == 2:
# Fieldname without underscore and short language code
# Examples:
# 'foo_de' --> 'foo-de',
# 'bar_en' --> 'bar-en'
css_class = '-'.join(bits)
elif len(bits) > 2:
# Try regionalized language code
# Examples:
# 'foo_es_ar' --> 'foo-es_ar',
# 'foo_bar_zh_tw' --> 'foo_bar-zh_tw'
css_class = _join_css_class(bits, 2)
if not css_class:
# Try short language code
# Examples:
# 'foo_bar_de' --> 'foo_bar-de',
# 'foo_bar_baz_de' --> 'foo_bar_baz-de'
css_class = _join_css_class(bits, 1)
return '%s-%s' % (prefix, css_class) if prefix else css_class
def unique(seq):
"""
>>> list(unique([1, 2, 3, 2, 2, 4, 1]))
[1, 2, 3, 4]
"""
seen = set()
return (x for x in seq if x not in seen and not seen.add(x))
def resolution_order(lang, override=None):
"""
Return order of languages which should be checked for parameter language.
First is always the parameter language, later are fallback languages.
Override parameter has priority over FALLBACK_LANGUAGES.
"""
if not settings.ENABLE_FALLBACKS:
return (lang,)
if override is None:
override = {}
fallback_for_lang = override.get(lang, settings.FALLBACK_LANGUAGES.get(lang, ()))
fallback_def = override.get('default', settings.FALLBACK_LANGUAGES['default'])
order = (lang,) + fallback_for_lang + fallback_def
return tuple(unique(order))
@contextmanager
def auto_populate(mode='all'):
"""
Overrides translation fields population mode (population mode decides which
unprovided translations will be filled during model construction / loading).
Example:
with auto_populate('all'):
s = Slugged.objects.create(title='foo')
s.title_en == 'foo' // True
s.title_de == 'foo' // True
This method may be used to ensure consistency loading untranslated fixtures,
with non-default language active:
with auto_populate('required'):
call_command('loaddata', 'fixture.json')
"""
current_population_mode = settings.AUTO_POPULATE
settings.AUTO_POPULATE = mode
try:
yield
finally:
settings.AUTO_POPULATE = current_population_mode
@contextmanager
def fallbacks(enable=True):
"""
Temporarily switch all language fallbacks on or off.
Example:
with fallbacks(False):
lang_has_slug = bool(self.slug)
May be used to enable fallbacks just when they're needed saving on some
processing or check if there is a value for the current language (not
knowing the language)
"""
current_enable_fallbacks = settings.ENABLE_FALLBACKS
settings.ENABLE_FALLBACKS = enable
try:
yield
finally:
settings.ENABLE_FALLBACKS = current_enable_fallbacks
def parse_field(setting, field_name, default):
"""
Extract result from single-value or dict-type setting like fallback_values.
"""
if isinstance(setting, dict):
return setting.get(field_name, default)
else:
return setting
|
bsd-3-clause
|
aristanetworks/arista-ovs-nova
|
nova/tests/xenapi/test_vm_utils.py
|
1
|
6730
|
import mox
from nova import context
from nova import db
from nova import exception
from nova.tests.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
import unittest
class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
def setUp(self):
super(GetInstanceForVdisForSrTestCase, self).setUp()
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',)
def test_get_instance_vdis_for_sr(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
vdi_1 = fake.create_vdi('vdiname1', sr_ref)
vdi_2 = fake.create_vdi('vdiname2', sr_ref)
for vdi_ref in [vdi_1, vdi_2]:
fake.create_vbd(vm_ref, vdi_ref)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEquals([vdi_1, vdi_2], result)
def test_get_instance_vdis_for_sr_no_vbd(self):
vm_ref = fake.create_vm("foo", "Running")
sr_ref = fake.create_sr()
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = list(vm_utils.get_instance_vdis_for_sr(
driver._session, vm_ref, sr_ref))
self.assertEquals([], result)
def test_get_vdis_for_boot_from_vol(self):
dev_params = {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
'vdi_uuid': 'falseVDI'}
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
dev_params)
self.assertEquals(result['root']['uuid'], 'falseVDI')
def test_get_vdis_for_boot_from_vol_failure(self):
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
def bad_introduce_sr(session, sr_uuid, label, sr_params):
return None
self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr)
dev_params = {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
'server': 'myserver',
'serverpath': '/local/scratch/myname',
'sr_type': 'nfs',
'introduce_sr_keys': ['server', 'serverpath', 'sr_type'],
'vdi_uuid': 'falseVDI'}
self.assertRaises(exception.NovaException,
vm_utils.get_vdis_for_boot_from_vol,
driver._session, dev_params)
class VMRefOrRaiseVMFoundTestCase(unittest.TestCase):
def test_lookup_call(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn('ignored')
mock.ReplayAll()
vm_utils.vm_ref_or_raise('session', 'somename')
mock.VerifyAll()
def test_return_value(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
mock.ReplayAll()
self.assertEquals(
'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
mock.VerifyAll()
class VMRefOrRaiseVMNotFoundTestCase(unittest.TestCase):
def test_exception_raised(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
self.assertRaises(
exception.InstanceNotFound,
lambda: vm_utils.vm_ref_or_raise('session', 'somename')
)
mock.VerifyAll()
def test_exception_msg_contains_vm_name(self):
mock = mox.Mox()
mock.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup('session', 'somename').AndReturn(None)
mock.ReplayAll()
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
self.assertTrue(
'somename' in str(e))
mock.VerifyAll()
class BittorrentTestCase(stubs.XenAPITestBase):
def setUp(self):
super(BittorrentTestCase, self).setUp()
self.context = context.get_admin_context()
def test_image_uses_bittorrent(self):
sys_meta = {'image_bittorrent': True}
instance = db.instance_create(self.context,
{'system_metadata': sys_meta})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.flags(xenapi_torrent_images='some')
self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
instance))
def _test_create_image(self, cache_type):
sys_meta = {'image_cache_in_nova': True}
instance = db.instance_create(self.context,
{'system_metadata': sys_meta})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.flags(cache_images=cache_type)
was = {'called': None}
def fake_create_cached_image(*args):
was['called'] = 'some'
return {}
self.stubs.Set(vm_utils, '_create_cached_image',
fake_create_cached_image)
def fake_fetch_image(*args):
was['called'] = 'none'
return {}
self.stubs.Set(vm_utils, '_fetch_image',
fake_fetch_image)
vm_utils._create_image(self.context, None, instance,
'foo', 'bar', 'baz')
self.assertEqual(was['called'], cache_type)
def test_create_image_cached(self):
self._test_create_image('some')
def test_create_image_uncached(self):
self._test_create_image('none')
|
apache-2.0
|
pmeier82/SpikePlot
|
spikeplot/plot_xvf_tensor.py
|
1
|
3432
|
# -*- coding: utf-8 -*-
#
# spikeplot - plot_xvf_tensor.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2011-09-29
#
"""plot the xi vs f tensor in a grid"""
__docformat__ = 'restructuredtext'
__all__ = ['xvf_tensor']
##---IMPORTS
from .common import save_figure, check_plotting_handle, plt
##---FUNCTION
def xvf_tensor(data, nc=4, data_trans=None, plot_handle=None,
title='Xi vs F Tensor', filename=None, show=True):
"""plots xcorrs tensor for a templates-filter set
:Parameters:
# xvf_tensor parameters
data : list
List holding [templates, filters, xvft]. Templates and filters
are in the channel concatenated representation. xvft has
dimensions
as [time, filters, templates]
nc : int
Channel count for templates, and filters.
data_trans : func
If not None, it has to be a data transformation function or lambda
that can be applied to the xvf tensor data.
# plot parameters
plot_handle : figure or axis
A reference to a figure or axis, or None if one has to be created.
title : str
A title for the plot. No title if None or ''.
filename : str
If given and a valid path on the local system, save the figure.
show : bool
If True, show the figure.
:Returns:
matplotlib.figure
Reference th the figure plotted on
matplotlib.axis
Reference to the axis plotted on
"""
# checks
fig = check_plotting_handle(plot_handle, create_ax=False)[0]
fig.clear()
if not isinstance(data, list):
raise TypeError('data expected to be a list of ndarrays: '
'[templates, filters,xvf-tensor data]')
if len(data) != 3:
raise ValueError('data expected to be a list of ndarrays: '
'[templates, filters,xvf-tensor data]')
temps, filts, xvft = data
if temps.shape != filts.shape:
raise ValueError('inconsistent shapes for templates and filters')
nitem = temps.shape[0]
# apply data transformation
if data_trans is not None:
xvft = data_trans(xvft)
# produce plot
n1 = nitem + 1
fmin, fmax = filts.min() * 1.1, filts.max() * 1.1
xmin, xmax = temps.min() * 1.1, temps.max() * 1.1
xvftmin, xvftmax = xvft.min() * 1.1, xvft.max() * 1.1
for j in xrange(nitem):
# j-th filter
ax_fj = fig.add_subplot(n1, n1, n1 * (j + 1) + 1)
ax_fj.plot(filts[j])
ax_fj.set_ylim(fmin, fmax)
ax_fj.set_xlim((0, temps[0].size))
# j-th xi
ax_uj = fig.add_subplot(n1, n1, j + 2)
ax_uj.plot(temps[j])
ax_uj.set_ylim(xmin, xmax)
ax_uj.set_xlim((0, temps[0].size))
# xcorrs
for i in xrange(nitem):
# the filter output of the j-th filter with the i-th unit
ax_xcij = fig.add_subplot(n1, n1, n1 * (j + 1) + i + 2)
ax_xcij.plot(xvft[i, j, :])
ax_xcij.set_ylim(xvftmin, xvftmax)
ax_xcij.set_xlim((0, xvft[i, j, :].size))
# fancy stuff
if title is not None:
fig.suptitle(title)
# produce plot
if filename is not None:
save_figure(fig, filename, '')
if show is True:
plt.show()
# return
return fig
##---MAIN
if __name__ == '__main__':
pass
|
mit
|
dursk/django
|
django/core/serializers/xml_serializer.py
|
184
|
15662
|
"""
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
|
bsd-3-clause
|
songmonit/CTTMSONLINE_V8
|
addons/crm_profiling/__init__.py
|
438
|
1089
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_profiling
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
thresholdsoftware/asylum-v2.0
|
openerp/addons/point_of_sale/wizard/pos_open_statement.py
|
48
|
4238
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class pos_open_statement(osv.osv_memory):
_name = 'pos.open.statement'
_description = 'Open Statements'
def open_statement(self, cr, uid, ids, context=None):
"""
Open the statements
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Blank Directory
"""
data = {}
mod_obj = self.pool.get('ir.model.data')
statement_obj = self.pool.get('account.bank.statement')
sequence_obj = self.pool.get('ir.sequence')
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
st_ids = []
j_ids = journal_obj.search(cr, uid, [('journal_user','=',1)], context=context)
if not j_ids:
raise osv.except_osv(_('No Cash Register Defined!'), _('You have to define which payment method must be available in the point of sale by reusing existing bank and cash through "Accounting / Configuration / Journals / Journals". Select a journal and check the field "PoS Payment Method" from the "Point of Sale" tab. You can also create new payment methods directly from menu "PoS Backend / Configuration / Payment Methods".'))
for journal in journal_obj.browse(cr, uid, j_ids, context=context):
ids = statement_obj.search(cr, uid, [('state', '!=', 'confirm'), ('user_id', '=', uid), ('journal_id', '=', journal.id)], context=context)
if journal.sequence_id:
number = sequence_obj.next_by_id(cr, uid, journal.sequence_id.id, context=context)
else:
number = sequence_obj.next_by_code(cr, uid, 'account.cash.statement', context=context)
data.update({
'journal_id': journal.id,
'user_id': uid,
'state': 'draft',
'name': number
})
statement_id = statement_obj.create(cr, uid, data, context=context)
st_ids.append(int(statement_id))
if journal.cash_control:
statement_obj.button_open(cr, uid, [statement_id], context)
tree_res = mod_obj.get_object_reference(cr, uid, 'point_of_sale', 'view_cash_statement_pos_tree')
tree_id = tree_res and tree_res[1] or False
form_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_bank_statement_form2')
form_id = form_res and form_res[1] or False
search_res = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_bank_statement_filter')
search_id = search_res and search_res[1] or False
return {
'type': 'ir.actions.act_window',
'name': _('List of Cash Registers'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.bank.statement',
'domain': str([('id', 'in', st_ids)]),
'views': [(tree_id, 'tree'), (form_id, 'form')],
'search_view_id': search_id,
}
pos_open_statement()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
soarpenguin/ansible
|
test/runner/lib/config.py
|
43
|
7640
|
"""Configuration classes."""
from __future__ import absolute_import, print_function
import os
import sys
from lib.util import (
CommonConfig,
is_shippable,
docker_qualify_image,
)
from lib.metadata import (
Metadata,
)
class EnvironmentConfig(CommonConfig):
"""Configuration common to all commands which execute in an environment."""
def __init__(self, args, command):
"""
:type args: any
"""
super(EnvironmentConfig, self).__init__(args)
self.command = command
self.local = args.local is True
if args.tox is True or args.tox is False or args.tox is None:
self.tox = args.tox is True
self.tox_args = 0
self.python = args.python if 'python' in args else None # type: str
else:
self.tox = True
self.tox_args = 1
self.python = args.tox # type: str
self.docker = docker_qualify_image(args.docker) # type: str
self.remote = args.remote # type: str
self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
self.docker_util = docker_qualify_image(args.docker_util if 'docker_util' in args else '') # type: str
self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
self.tox_sitepackages = args.tox_sitepackages # type: bool
self.remote_stage = args.remote_stage # type: str
self.remote_aws_region = args.remote_aws_region # type: str
self.remote_terminate = args.remote_terminate # type: str
self.requirements = args.requirements # type: bool
if self.python == 'default':
self.python = '.'.join(str(i) for i in sys.version_info[:2])
self.python_version = self.python or '.'.join(str(i) for i in sys.version_info[:2])
self.delegate = self.tox or self.docker or self.remote
if self.delegate:
self.requirements = True
class TestConfig(EnvironmentConfig):
"""Configuration common to all test commands."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(TestConfig, self).__init__(args, command)
self.coverage = args.coverage # type: bool
self.coverage_label = args.coverage_label # type: str
self.include = args.include # type: list [str]
self.exclude = args.exclude # type: list [str]
self.require = args.require # type: list [str]
self.changed = args.changed # type: bool
self.tracked = args.tracked # type: bool
self.untracked = args.untracked # type: bool
self.committed = args.committed # type: bool
self.staged = args.staged # type: bool
self.unstaged = args.unstaged # type: bool
self.changed_from = args.changed_from # type: str
self.changed_path = args.changed_path # type: list [str]
self.lint = args.lint if 'lint' in args else False # type: bool
self.junit = args.junit if 'junit' in args else False # type: bool
self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
self.metadata_path = None
class ShellConfig(EnvironmentConfig):
"""Configuration for the shell command."""
def __init__(self, args):
"""
:type args: any
"""
super(ShellConfig, self).__init__(args, 'shell')
class SanityConfig(TestConfig):
"""Configuration for the sanity command."""
def __init__(self, args):
"""
:type args: any
"""
super(SanityConfig, self).__init__(args, 'sanity')
self.test = args.test # type: list [str]
self.skip_test = args.skip_test # type: list [str]
self.list_tests = args.list_tests # type: bool
if args.base_branch:
self.base_branch = args.base_branch # str
elif is_shippable():
self.base_branch = os.environ.get('BASE_BRANCH', '') # str
if self.base_branch:
self.base_branch = 'origin/%s' % self.base_branch
else:
self.base_branch = ''
class IntegrationConfig(TestConfig):
"""Configuration for the integration command."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(IntegrationConfig, self).__init__(args, command)
self.start_at = args.start_at # type: str
self.start_at_task = args.start_at_task # type: str
self.allow_destructive = args.allow_destructive if 'allow_destructive' in args else False # type: bool
self.retry_on_error = args.retry_on_error # type: bool
self.continue_on_error = args.continue_on_error # type: bool
self.debug_strategy = args.debug_strategy # type: bool
self.changed_all_target = args.changed_all_target # type: str
self.list_targets = args.list_targets # type: bool
self.tags = args.tags
self.skip_tags = args.skip_tags
self.diff = args.diff
if self.list_targets:
self.explain = True
class PosixIntegrationConfig(IntegrationConfig):
"""Configuration for the posix integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(PosixIntegrationConfig, self).__init__(args, 'integration')
class WindowsIntegrationConfig(IntegrationConfig):
"""Configuration for the windows integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration')
self.windows = args.windows # type: list [str]
if self.windows:
self.allow_destructive = True
class NetworkIntegrationConfig(IntegrationConfig):
"""Configuration for the network integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(NetworkIntegrationConfig, self).__init__(args, 'network-integration')
self.platform = args.platform # type: list [str]
self.inventory = args.inventory # type: str
class UnitsConfig(TestConfig):
"""Configuration for the units command."""
def __init__(self, args):
"""
:type args: any
"""
super(UnitsConfig, self).__init__(args, 'units')
self.collect_only = args.collect_only # type: bool
class CompileConfig(TestConfig):
"""Configuration for the compile command."""
def __init__(self, args):
"""
:type args: any
"""
super(CompileConfig, self).__init__(args, 'compile')
class CoverageConfig(EnvironmentConfig):
"""Configuration for the coverage command."""
def __init__(self, args):
"""
:type args: any
"""
super(CoverageConfig, self).__init__(args, 'coverage')
self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: frozenset [str]
self.all = args.all if 'all' in args else False # type: bool
self.stub = args.stub if 'stub' in args else False # type: bool
class CoverageReportConfig(CoverageConfig):
"""Configuration for the coverage report command."""
def __init__(self, args):
"""
:type args: any
"""
super(CoverageReportConfig, self).__init__(args)
self.show_missing = args.show_missing # type: bool
|
gpl-3.0
|
photoninger/ansible
|
test/units/modules/network/aireos/aireos_module.py
|
73
|
2510
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestCiscoWlcModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
|
gpl-3.0
|
PaulWay/insights-core
|
insights/tools/perf.py
|
2
|
5493
|
#!/usr/bin/env python
import argparse
import json
import logging
import multiprocessing as mp
import os
import signal
import time
from collections import defaultdict
from random import sample
from insights.core import archives
from insights.core import load_package
from insights.core.evaluators import MultiEvaluator, SingleEvaluator
from insights.core.specs import SpecMapper
try:
from insights_nexus.config.factory import get_config
config = get_config()
except:
config = None
log = logging.getLogger(__name__)
stop = False
def stop_handler(signum, frame):
global stop
stop = True
signal.signal(signal.SIGINT, stop_handler)
signal.signal(signal.SIGTERM, stop_handler)
def get_args():
parser = argparse.ArgumentParser("python -m insights.tools.perf")
parser.add_argument("-p", "--package", required=True, dest="package", help="Package containing the rules to process.")
parser.add_argument("-n", "--num_archives", default=10, dest="num_archives", type=int, help="Number of archives to process.")
parser.add_argument("-w", "--workers", default=mp.cpu_count() / 2, dest="num_workers", type=int, help="Number of processes to use.")
parser.add_argument("-e", "--extract_dir", default="/tmp", dest="extract_dir", help="Working directory into which archives are extracted.")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Output DEBUG level messages and final stats.")
parser.add_argument("-s", "--silent", default=False, action="store_true", help="Output only FATAL messages and final stats.")
parser.add_argument("-r", "--random", default=False, action="store_true", help="Randomly select archives from all available.")
parser.add_argument("archive_path", nargs="*", help="Archive file or directory containing archives. Multiple files or directories may be specified.")
return parser.parse_args()
def print_stats(times, start, end, num_workers):
l = len(times)
median = sorted(times)[l / 2] if l else 0.0
avg = (sum(times) / float(l)) if l else 0.0
msg = """
Workers: %s
Max: %s
Min: %s
Avg: %s
Med: %s
Tot: %s
Throughput: %s
""" % (num_workers, max(times), min(times), avg, median, l, (float(l) / (end - start)))
print msg
def print_response(r):
skips = set()
for sk in r["skips"]:
ski = sk["details"]
something = ski[5:ski.index("]") + 1].replace("'", '"')
for ha in json.loads(something):
skips.add(ha)
r["skips"] = list(skips)
print json.dumps(r)
def get_paths(roots):
paths = []
for root in roots:
if os.path.isdir(root):
paths.extend([os.path.join(root, f) for f in os.listdir(root) if '.tar' in f])
elif '.tar' in root:
paths.append(root)
return paths
def process_report(path, tmp_dir):
with archives.TarExtractor() as extractor:
if config is None:
spec_mapper = SpecMapper(extractor.from_path(path, tmp_dir))
else:
spec_mapper = SpecMapper(extractor.from_path(path, tmp_dir), config)
md = json.loads(spec_mapper.get_content("metadata.json", split=False, default="{}"))
evaluator = MultiEvaluator(spec_mapper) if md and 'systems' in md else SingleEvaluator(spec_mapper)
return evaluator.process()
def worker(paths, extract_dir, results_queue):
for path in paths:
if stop:
results_queue.put(None)
return
result = None
start = time.time()
try:
result = process_report(path, extract_dir)
except Exception as ex:
result = ex
duration = time.time() - start
results_queue.put((duration, result))
def process_reports(paths, extract_dir, num_workers):
start = time.time()
times = []
results = []
results_queue = mp.Queue()
buckets = defaultdict(list)
for idx, path in enumerate(paths):
buckets[idx % num_workers].append(path)
pool = []
for i, p in buckets.iteritems():
args = (p, extract_dir, results_queue)
proc = mp.Process(target=worker, name="worker-%s" % i, args=args)
pool.append(proc)
for proc in pool:
proc.start()
def signal_handler(signum, frame):
print_stats(times, start, time.time(), num_workers)
signal.signal(signal.SIGUSR1, signal_handler)
stops = 0
for i in range(len(paths)):
t = results_queue.get()
if t is None:
stops += 1
if stops == num_workers:
break
else:
continue
d, r = t
times.append(d)
results.append(r)
print_stats(times, start, time.time(), num_workers)
for proc in pool:
proc.join()
def main():
args = get_args()
if args.silent:
logging.basicConfig(level=logging.FATAL)
else:
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
load_package(args.package)
extract_dir = args.extract_dir
num_archives = args.num_archives
paths = get_paths(args.archive_path)
if num_archives < len(paths):
if args.random:
paths = sample(paths, num_archives)
else:
paths = paths[:num_archives]
if len(paths) > 1:
process_reports(paths, extract_dir, args.num_workers)
else:
print_response(process_report(paths[0], extract_dir))
if __name__ == "__main__":
main()
|
apache-2.0
|
johnkeepmoving/oss-ftp
|
python27/win32/Lib/site-packages/setuptools/tests/test_packageindex.py
|
377
|
7625
|
"""Package Index Tests
"""
import sys
import os
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode, pathname2url
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
def test_local_index(self):
"""
local_open should be able to read an index from the file system.
"""
f = open('index.html', 'w')
f.write('<div>content</div>')
f.close()
try:
url = 'file:' + pathname2url(os.getcwd()) + '/'
res = setuptools.package_index.local_open(url)
finally:
os.remove('index.html')
assert 'content' in res.read()
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
|
mit
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/scipy/sparse/linalg/eigen/arpack/setup.py
|
6
|
1388
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers
config = Configuration('arpack',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('arpack', parent_package, top_path)
arpack_sources = [join('ARPACK','SRC', '*.f')]
arpack_sources.extend([join('ARPACK','UTIL', '*.f')])
arpack_sources.extend([join('ARPACK','LAPACK', '*.f')])
arpack_sources += get_g77_abi_wrappers(lapack_opt)
config.add_library('arpack_scipy', sources=arpack_sources,
include_dirs=[join('ARPACK', 'SRC')])
config.add_extension('_arpack',
sources='arpack.pyf.src',
libraries=['arpack_scipy'],
extra_info=lapack_opt,
depends=arpack_sources,
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
agpl-3.0
|
mitake/linux
|
tools/perf/util/setup.py
|
4998
|
1330
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
konstruktoid/ansible-upstream
|
lib/ansible/modules/network/aruba/aruba_config.py
|
50
|
15163
|
#!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aruba_config
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Manage Aruba configuration sections
description:
- Aruba configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with Aruba configuration sections in
a deterministic way.
extends_documentation_fragment: aruba
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(running_config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
aliases: ['config']
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default. Using
this argument will change that before. If the argument is set to
I(always), then the running-config will always be copied to the
startup-config and the I(modified) flag will always be set to
True. If the argument is set to I(modified), then the running-config
will only be copied to the startup-config if it has changed since
the last save to startup-config. If the argument is set to
I(never), the running-config will never be copied to the
startup-config. If the argument is set to I(changed), then the running-config
will only be copied to the startup-config if the task has made a change.
default: never
choices: ['always', 'never', 'modified', 'changed']
version_added: "2.5"
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configure as I(startup), the module will return
the diff of the running-config against the startup-config.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
choices: ['startup', 'intended', 'running']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against. This argument will not modify any settings
on the remote device and is strictly used to check the compliance
of the current device's configuration against. When specifying this
argument, the task should also modify the C(diff_against) value and
set it to I(intended).
encrypt:
description:
- This allows an Aruba controller's passwords and keys to be displayed in plain
text when set to I(false) or encrypted when set to I(true).
If set to I(false), the setting will re-encrypt at the end of the module run.
Backups are still encrypted even when set to I(false).
type: bool
default: 'yes'
version_added: "2.5"
"""
EXAMPLES = """
- name: configure top level configuration
aruba_config:
lines: hostname {{ inventory_hostname }}
- name: diff the running-config against a provided config
aruba_config:
diff_against: intended
intended: "{{ lookup('file', 'master.cfg') }}"
- name: configure interface settings
aruba_config:
lines:
- description test interface
- ip access-group 1 in
parents: interface gigabitethernet 0/0/0
- name: load new acl into device
aruba_config:
lines:
- permit host 10.10.10.10
- ipv6 permit host fda9:97d6:32a3:3e59::3333
parents: ip access-list standard 1
before: no ip access-list standard 1
match: exact
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/aruba_config.2016-07-16@22:28:34
"""
from ansible.module_utils.network.aruba.aruba import run_commands, get_config, load_config
from ansible.module_utils.network.aruba.aruba import aruba_argument_spec
from ansible.module_utils.network.aruba.aruba import check_args as aruba_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
def get_running_config(module, config=None):
contents = module.params['running_config']
if not contents:
if config:
contents = config
else:
contents = get_config(module)
return NetworkConfig(contents=contents)
def get_candidate(module):
candidate = NetworkConfig()
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def save_config(module, result):
result['changed'] = True
if not module.check_mode:
run_commands(module, 'copy running-config startup-config')
else:
module.warn('Skipping command `copy running-config startup-config` '
'due to check_mode. Configuration not copied to '
'non-volatile storage')
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
running_config=dict(aliases=['config']),
intended_config=dict(),
backup=dict(type='bool', default=False),
save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'),
diff_against=dict(choices=['running', 'startup', 'intended']),
diff_ignore_lines=dict(type='list'),
encrypt=dict(type='bool', default=True),
)
argument_spec.update(aruba_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
aruba_check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config = None
if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):
contents = get_config(module)
config = NetworkConfig(contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if not module.params['encrypt']:
run_commands(module, 'encrypt disable')
if any((module.params['src'], module.params['lines'])):
match = module.params['match']
replace = module.params['replace']
candidate = get_candidate(module)
if match != 'none':
config = get_running_config(module, config)
path = module.params['parents']
configobjs = candidate.difference(config, match=match, replace=replace, path=path)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
running_config = None
startup_config = None
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save_when'] == 'always':
save_config(module, result)
elif module.params['save_when'] == 'modified':
output = run_commands(module, ['show running-config', 'show startup-config'])
running_config = NetworkConfig(contents=output[0], ignore_lines=diff_ignore_lines)
startup_config = NetworkConfig(contents=output[1], ignore_lines=diff_ignore_lines)
if running_config.sha1 != startup_config.sha1:
save_config(module, result)
elif module.params['save_when'] == 'changed':
if result['changed']:
save_config(module, result)
if module._diff:
if not running_config:
output = run_commands(module, 'show running-config')
contents = output[0]
else:
contents = running_config.config_text
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(contents=contents, ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'startup':
if not startup_config:
output = run_commands(module, 'show startup-config')
contents = output[0]
else:
contents = startup_config.config_text
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = NetworkConfig(contents=contents, ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
result.update({
'changed': True,
'diff': {'before': str(base_config), 'after': str(running_config)}
})
# make sure 'encrypt enable' is applied if it was ever disabled
if not module.params['encrypt']:
run_commands(module, 'encrypt enable')
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
jamesblunt/edx-platform
|
openedx/core/djangoapps/content/course_structures/tests.py
|
54
|
8934
|
import json
from xmodule_django.models import UsageKey
from xmodule.modulestore.django import SignalHandler
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.content.course_structures.signals import listen_for_course_publish
from openedx.core.djangoapps.content.course_structures.tasks import _generate_course_structure, update_course_structure
class SignalDisconnectTestMixin(object):
"""
Mixin for tests to disable calls to signals.listen_for_course_publish when the course_published signal is fired.
"""
def setUp(self):
super(SignalDisconnectTestMixin, self).setUp()
SignalHandler.course_published.disconnect(listen_for_course_publish)
class CourseStructureTaskTests(ModuleStoreTestCase):
def setUp(self, **kwargs):
super(CourseStructureTaskTests, self).setUp()
self.course = CourseFactory.create(org='TestX', course='TS101', run='T1')
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.discussion_module_1 = ItemFactory.create(
parent=self.course,
category='discussion',
discussion_id='test_discussion_id_1'
)
self.discussion_module_2 = ItemFactory.create(
parent=self.course,
category='discussion',
discussion_id='test_discussion_id_2'
)
CourseStructure.objects.all().delete()
def test_generate_course_structure(self):
blocks = {}
def add_block(block):
children = block.get_children() if block.has_children else []
blocks[unicode(block.location)] = {
"usage_key": unicode(block.location),
"block_type": block.category,
"display_name": block.display_name,
"graded": block.graded,
"format": block.format,
"children": [unicode(child.location) for child in children]
}
for child in children:
add_block(child)
add_block(self.course)
expected = {
'root': unicode(self.course.location),
'blocks': blocks
}
self.maxDiff = None
actual = _generate_course_structure(self.course.id)
self.assertDictEqual(actual['structure'], expected)
def test_structure_json(self):
"""
Although stored as compressed data, CourseStructure.structure_json should always return the uncompressed string.
"""
course_id = 'a/b/c'
structure = {
'root': course_id,
'blocks': {
course_id: {
'id': course_id
}
}
}
structure_json = json.dumps(structure)
structure = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertEqual(structure.structure_json, structure_json)
# Reload the data to ensure the init signal is fired to decompress the data.
cs = CourseStructure.objects.get(course_id=self.course.id)
self.assertEqual(cs.structure_json, structure_json)
def test_structure(self):
"""
CourseStructure.structure should return the uncompressed, JSON-parsed course structure.
"""
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c'
}
}
}
structure_json = json.dumps(structure)
cs = CourseStructure.objects.create(course_id=self.course.id, structure_json=structure_json)
self.assertDictEqual(cs.structure, structure)
def test_ordered_blocks(self):
structure = {
'root': 'a/b/c',
'blocks': {
'a/b/c': {
'id': 'a/b/c',
'children': [
'g/h/i'
]
},
'd/e/f': {
'id': 'd/e/f',
'children': []
},
'g/h/i': {
'id': 'h/j/k',
'children': [
'j/k/l',
'd/e/f'
]
},
'j/k/l': {
'id': 'j/k/l',
'children': []
}
}
}
in_order_blocks = ['a/b/c', 'g/h/i', 'j/k/l', 'd/e/f']
structure_json = json.dumps(structure)
retrieved_course_structure = CourseStructure.objects.create(
course_id=self.course.id, structure_json=structure_json
)
self.assertEqual(retrieved_course_structure.ordered_blocks.keys(), in_order_blocks)
def test_block_with_missing_fields(self):
"""
The generator should continue to operate on blocks/XModule that do not have graded or format fields.
"""
# TODO In the future, test logging using testfixtures.LogCapture
# (https://pythonhosted.org/testfixtures/logging.html). Talk to TestEng before adding that library.
category = 'peergrading'
display_name = 'Testing Module'
module = ItemFactory.create(parent=self.section, category=category, display_name=display_name)
structure = _generate_course_structure(self.course.id)
usage_key = unicode(module.location)
actual = structure['structure']['blocks'][usage_key]
expected = {
"usage_key": usage_key,
"block_type": category,
"display_name": display_name,
"graded": False,
"format": None,
"children": []
}
self.assertEqual(actual, expected)
def test_generate_discussion_id_map(self):
id_map = {}
def add_block(block):
"""Adds the given block and all of its children to the expected discussion id map"""
children = block.get_children() if block.has_children else []
if block.category == 'discussion':
id_map[block.discussion_id] = unicode(block.location)
for child in children:
add_block(child)
add_block(self.course)
actual = _generate_course_structure(self.course.id)
self.assertEqual(actual['discussion_id_map'], id_map)
def test_discussion_id_map_json(self):
id_map = {
'discussion_id_1': 'module_location_1',
'discussion_id_2': 'module_location_2'
}
id_map_json = json.dumps(id_map)
structure = CourseStructure.objects.create(course_id=self.course.id, discussion_id_map_json=id_map_json)
self.assertEqual(structure.discussion_id_map_json, id_map_json)
structure = CourseStructure.objects.get(course_id=self.course.id)
self.assertEqual(structure.discussion_id_map_json, id_map_json)
def test_discussion_id_map(self):
id_map = {
'discussion_id_1': 'block-v1:TestX+TS101+T1+type@discussion+block@b141953dff414921a715da37eb14ecdc',
'discussion_id_2': 'i4x://TestX/TS101/discussion/466f474fa4d045a8b7bde1b911e095ca'
}
id_map_json = json.dumps(id_map)
structure = CourseStructure.objects.create(course_id=self.course.id, discussion_id_map_json=id_map_json)
expected_id_map = {
key: UsageKey.from_string(value).map_into_course(self.course.id)
for key, value in id_map.iteritems()
}
self.assertEqual(structure.discussion_id_map, expected_id_map)
def test_discussion_id_map_missing(self):
structure = CourseStructure.objects.create(course_id=self.course.id)
self.assertIsNone(structure.discussion_id_map)
def test_update_course_structure(self):
"""
Test the actual task that orchestrates data generation and updating the database.
"""
# Method requires string input
course_id = self.course.id
self.assertRaises(ValueError, update_course_structure, course_id)
# Ensure a CourseStructure object is created
expected_structure = _generate_course_structure(course_id)
update_course_structure(unicode(course_id))
structure = CourseStructure.objects.get(course_id=course_id)
self.assertEqual(structure.course_id, course_id)
self.assertEqual(structure.structure, expected_structure['structure'])
self.assertEqual(structure.discussion_id_map.keys(), expected_structure['discussion_id_map'].keys())
self.assertEqual(
[unicode(value) for value in structure.discussion_id_map.values()],
expected_structure['discussion_id_map'].values()
)
|
agpl-3.0
|
nevercast/home-assistant
|
homeassistant/components/device_tracker/owntracks.py
|
3
|
3174
|
"""
homeassistant.components.device_tracker.owntracks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OwnTracks platform for the device tracker.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import homeassistant.components.mqtt as mqtt
from homeassistant.const import (STATE_HOME, STATE_NOT_HOME)
DEPENDENCIES = ['mqtt']
CONF_TRANSITION_EVENTS = 'use_events'
LOCATION_TOPIC = 'owntracks/+/+'
EVENT_TOPIC = 'owntracks/+/+/event'
def setup_scanner(hass, config, see):
""" Set up an OwnTracks tracker. """
def owntracks_location_update(topic, payload, qos):
""" MQTT message received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'location':
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
see(**kwargs)
def owntracks_event_update(topic, payload, qos):
""" MQTT event (geofences) received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typetransition
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'transition':
return
# check if in "home" fence or other zone
location = ''
if data['event'] == 'enter':
if data['desc'].lower() == 'home':
location = STATE_HOME
else:
location = data['desc']
elif data['event'] == 'leave':
location = STATE_NOT_HOME
else:
logging.getLogger(__name__).error(
'Misformatted mqtt msgs, _type=transition, event=%s',
data['event'])
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
'location_name': location,
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
see(**kwargs)
use_events = config.get(CONF_TRANSITION_EVENTS)
if use_events:
mqtt.subscribe(hass, EVENT_TOPIC, owntracks_event_update, 1)
else:
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
return True
|
mit
|
hastexo/edx-platform
|
lms/djangoapps/course_goals/views.py
|
2
|
4467
|
"""
Course Goals Views - includes REST API
"""
import analytics
from django.contrib.auth import get_user_model
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.http import JsonResponse
from edx_rest_framework_extensions.authentication import JwtAuthentication
from eventtracking import tracker
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.api.permissions import IsStaffOrOwner
from rest_framework import permissions, serializers, viewsets, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.response import Response
from .api import get_course_goal_options
from .models import CourseGoal, GOAL_KEY_CHOICES
User = get_user_model()
class CourseGoalSerializer(serializers.ModelSerializer):
"""
Serializes CourseGoal models.
"""
user = serializers.SlugRelatedField(slug_field='username', queryset=User.objects.all())
class Meta:
model = CourseGoal
fields = ('user', 'course_key', 'goal_key')
class CourseGoalViewSet(viewsets.ModelViewSet):
"""
API calls to create and update a course goal.
Validates incoming data to ensure that course_key maps to an actual
course and that the goal_key is a valid option.
**Use Case**
* Create a new goal for a user.
* Update an existing goal for a user
**Example Requests**
POST /api/course_goals/v0/course_goals/
Request data: {"course_key": <course-key>, "goal_key": "<goal-key>", "user": "<username>"}
Returns Http400 response if the course_key does not map to a known
course or if the goal_key does not map to a valid goal key.
"""
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated, IsStaffOrOwner,)
queryset = CourseGoal.objects.all()
serializer_class = CourseGoalSerializer
def create(self, post_data):
""" Create a new goal if one does not exist, otherwise update the existing goal. """
# Ensure goal_key is valid
goal_options = get_course_goal_options()
goal_key = post_data.data['goal_key']
if goal_key not in goal_options:
return Response(
'Provided goal key, {goal_key}, is not a valid goal key (options= {goal_options}).'.format(
goal_key=goal_key,
goal_options=goal_options,
),
status=status.HTTP_400_BAD_REQUEST,
)
# Ensure course key is valid
course_key = CourseKey.from_string(post_data.data['course_key'])
if not course_key:
return Response(
'Provided course_key ({course_key}) does not map to a course.'.format(
course_key=course_key
),
status=status.HTTP_400_BAD_REQUEST,
)
user = post_data.user
goal = CourseGoal.objects.filter(user=user.id, course_key=course_key).first()
if goal:
goal.goal_key = goal_key
goal.save(update_fields=['goal_key'])
else:
CourseGoal.objects.create(
user=user,
course_key=course_key,
goal_key=goal_key,
)
data = {
'goal_key': str(goal_key),
'goal_text': str(goal_options[goal_key]),
'is_unsure': goal_key == GOAL_KEY_CHOICES.unsure,
}
return JsonResponse(data, content_type="application/json", status=(200 if goal else 201))
@receiver(post_save, sender=CourseGoal, dispatch_uid="emit_course_goals_event")
def emit_course_goal_event(sender, instance, **kwargs):
name = 'edx.course.goal.added' if kwargs.get('created', False) else 'edx.course.goal.updated'
tracker.emit(
name,
{
'goal_key': instance.goal_key,
}
)
if settings.LMS_SEGMENT_KEY:
update_google_analytics(name, instance.user.id)
def update_google_analytics(name, user_id):
""" Update student course goal for Google Analytics using Segment. """
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(
user_id,
name,
context=context
)
|
agpl-3.0
|
zhDai/CToFun
|
简单购物模式/pb_.py
|
1
|
8322
|
#_*_ coding:utf-8 _*_
import sys,os,getpass
#######################################################################
def start_screen():
print ('''
*****************************************************************************
If you have one account,then choose "登录(L)";if none, please choose "注册(R)"
So L or R?
*****************************************************************************
''')
comein = raw_input("Your choice:")
return comein
######################对raw_input输入字符类型判断并转化#####################
def input_handle(s):
if str.isdigit(s): ###对输入是否是数字进行判断###
s = int(s) ###如果输出的是个数字,则转化为整数类型###
return s
####################框架函数###################################
def framework(name='',balance=''):
os.system('cls') ###清屏###
balance = int(balance)
print('''
******************************************************************
* *
* 欢迎来到sky购物平台 *
* *
****************************************************************** *
会员:%s 当前余额:%d
''' % (name,balance))
########################商品列表展示函数#######################
def shop_show(shop_dict):
res_dict = {}
############对商品列表进行遍历并加上数字编号###############
i = 1
print('商品列表 :')
print('================================================================')
print('%-5s \t %-20s \t %-10s \t %-10s' % ('编号','商品名称','商品价格(元)','商品数量(个)'))
for k in shop_dict:
v = shop_dict[k]
if type(v) is dict:
print('%-5d \t %-20s \t %-10s \t %-10s' % (i,k,v['price'],v['num']))
res_dict[i] = [k,v['price'],v['num']]
i += 1
print('================================================================')
print('q : Exit')
return res_dict
#########################购物车函数#############################
def shopping_cart(shop_cart):
############对商品列表进行遍历并加上数字编号###############
print('###################欢迎来到您的购物车##################')
print('%-20s \t %-10s \t %-10s \t %-10s' % ('商品名称','商品价格(元)','购买数量(个)','购买总金额(元)'))
for k in shop_cart:
v = shop_cart[k]
if type(v) is list:
print('%-20s \t %-10d \t %-10d \t %-10d' % (k,v[0],v[1],v[2]))
print('###################请确认您购买商品####################')
########################################################################
def main_():
i = 0
while i < 3: #只要用户登录异常不超过3次就不断循环
name = raw_input('请输入用户名:') #输入用户名
passwd = raw_input('请输入密码:') #输入隐藏密码
user_file = open('info.txt','r') #打开帐号文件
user_list = user_file.readlines()
for user_line in user_list: #对帐号文件进行遍历
(user,password,balance) = user_line.strip('\n').split() #分别获取帐号、密码信息和当前余额
balance = int(balance)
if name == user and passwd == password: #如用户名和密码正常匹配
my_shop_cart = {}
first_flag = 1
while first_flag:
framework(user,balance)
new_dict = shop_show(shopping_dict)
shop_index = raw_input('请输入商品编号 | 退出(q): ')
###############如果输入非空,对输入进行判断并转化类型###########
if len(shop_index) != 0:
shop_index = input_handle(shop_index)
if shop_index == 'q': ###如果输入为q,则退出程序###
sys.exit(0)
elif shop_index in new_dict:
(shop_name,shop_price,shop_num) = (new_dict[shop_index][0], new_dict[shop_index][1], new_dict[shop_index][2])
print('商品信息 【 名称:%-15s \t 价格:%-5d(元) \t 数量:%-5d(个)】' % (shop_name,shop_price,shop_num))
second_flag = 1
while second_flag:
shop_num = raw_input('请输入购买商品个数 | 返回(b) | 退出(q): ')
if len(shop_num) != 0:
shop_num = input_handle(shop_num)
if shop_num == 'q': ###如果输入为q,则退出程序###
sys.exit(0)
elif shop_num == 'b':
break
elif shop_num > 0 and shop_num <= new_dict[shop_index][2]:
shop_sum = shop_price * shop_num
if shop_sum <= balance:
print('购买商品 %s 总价格为 : %d' % (shop_name,shop_sum))
add_flag = raw_input('请确认是否加入购物车(y | n):')
if add_flag == 'y':
my_shop_cart[shop_name] = [shop_price,shop_num,shop_sum]
balance -= shop_sum
shopping_dict[shop_name]['num'] -= shop_num
second_flag = 0
else:
break
else:
print('您的余额不足,请充值或重新选择,谢谢')
else:
pass
shopping_cart(my_shop_cart)
else:
pass
else:
if i != 2:
print('用户或密码错误,请重新输入,还有 %d 次机会' % (2 - i))
i += 1
else:
sys.exit('用户或密码输入错误超过三次,退出系统,欢迎下次光临') #用户输入三次错误后,异常退出
user_file.close() #关闭帐号文件
################################主程序开始################################
shopping_dict = {
'iphone6': {'price':6000,'num':10},
'ipad': {'price':3000,'num':20},
'mi4': {'price':2000,'num':43},
'huawei6_plus': {'price':1999,'num':8},
}
start = start_screen()
if start == "L":
main_()
elif start == "R":
judge = 1
while judge:
Rname = raw_input("注册的用户名:")
Rpassword = raw_input("设置的密码:")
f = open('info.txt','r')
#with open('info.txt','wr') as f:
for line in f.readlines():
(user,password,balance)=line.strip().split()
if Rname == user:
judge = 1
print ('''
*********************************************
改用户名已被注册,请重新输入!
*********************************************
''')
break
else:
judge = 0
f.close()
if judge == 0:
f = open('info.txt','a')
f.write(Rname+" "+Rpassword+" "+"0\n")
f.close()
print "祝贺你注册成功,返回登录(R)|退出(Q)"
chosen = raw_input("您的选择:")
if chosen == "R":
main_()
elif chosen == "Q":
sys.exit(0)
else:
sys.exit(0)
|
gpl-2.0
|
dsnopek/anki-sync-server
|
AnkiServer/__init__.py
|
1
|
1558
|
# AnkiServer - A personal Anki sync server
# Copyright (C) 2013 David Snopek
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os.path
# We put the system installed Anki first!
sys.path.insert(0, "/usr/share/anki")
# We'll put our bundled Anki after it
sys.path.insert(1, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'anki-bundled'))
__author__ = "David Snopek <[email protected]>"
__copyright__ = "Copyright (C) 2013 David Snopek"
__license__ = "GNU Affero General Public License v3 or later (AGPLv3+)"
__version__ = "2.0.6"
__all__ = []
def server_runner(app, global_conf, **kw):
""" Special version of paste.httpserver.server_runner which calls
AnkiServer.threading.shutdown() on server exit."""
from paste.httpserver import server_runner as paste_server_runner
from AnkiServer.threading import shutdown
try:
paste_server_runner(app, global_conf, **kw)
finally:
shutdown()
|
agpl-3.0
|
gqwest-erp/server
|
openerp/addons/account_sequence/__init__.py
|
433
|
1104
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_sequence
import account_sequence_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
SAM-IT-SA/odoo
|
addons/im_livechat/im_livechat.py
|
239
|
12432
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import random
import openerp
import json
import openerp.addons.im_chat.im_chat
from openerp.osv import osv, fields
from openerp import tools
from openerp import http
from openerp.http import request
class im_livechat_channel(osv.Model):
_name = 'im_livechat.channel'
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('im_livechat', 'static/src/img', 'default.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _are_you_inside(self, cr, uid, ids, name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = False
for user in record.user_ids:
if user.id == uid:
res[record.id] = True
break
return res
def _script_external(self, cr, uid, ids, name, arg, context=None):
values = {
"url": self.pool.get('ir.config_parameter').get_param(cr, openerp.SUPERUSER_ID, 'web.base.url'),
"dbname":cr.dbname
}
res = {}
for record in self.browse(cr, uid, ids, context=context):
values["channel"] = record.id
res[record.id] = self.pool['ir.ui.view'].render(cr, uid, 'im_livechat.external_loader', values, context=context)
return res
def _script_internal(self, cr, uid, ids, name, arg, context=None):
values = {
"url": self.pool.get('ir.config_parameter').get_param(cr, openerp.SUPERUSER_ID, 'web.base.url'),
"dbname":cr.dbname
}
res = {}
for record in self.browse(cr, uid, ids, context=context):
values["channel"] = record.id
res[record.id] = self.pool['ir.ui.view'].render(cr, uid, 'im_livechat.internal_loader', values, context=context)
return res
def _web_page(self, cr, uid, ids, name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = self.pool.get('ir.config_parameter').get_param(cr, openerp.SUPERUSER_ID, 'web.base.url') + \
"/im_livechat/support/%s/%i" % (cr.dbname, record.id)
return res
_columns = {
'name': fields.char(string="Channel Name", size=200, required=True),
'user_ids': fields.many2many('res.users', 'im_livechat_channel_im_user', 'channel_id', 'user_id', string="Users"),
'are_you_inside': fields.function(_are_you_inside, type='boolean', string='Are you inside the matrix?', store=False),
'script_internal': fields.function(_script_internal, type='text', string='Script (internal)', store=False),
'script_external': fields.function(_script_external, type='text', string='Script (external)', store=False),
'web_page': fields.function(_web_page, type='char', string='Web Page', store=False),
'button_text': fields.char(string="Text of the Button"),
'input_placeholder': fields.char(string="Chat Input Placeholder"),
'default_message': fields.char(string="Welcome Message", help="This is an automated 'welcome' message that your visitor will see when they initiate a new chat session."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'im_livechat.channel': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'im_livechat.channel': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
def _default_user_ids(self, cr, uid, context=None):
return [(6, 0, [uid])]
_defaults = {
'button_text': "Have a Question? Chat with us.",
'input_placeholder': "How may I help you?",
'default_message': '',
'user_ids': _default_user_ids,
'image': _get_default_image,
}
def get_available_users(self, cr, uid, channel_id, context=None):
""" get available user of a given channel """
channel = self.browse(cr, uid, channel_id, context=context)
users = []
for user_id in channel.user_ids:
if (user_id.im_status == 'online'):
users.append(user_id)
return users
def get_channel_session(self, cr, uid, channel_id, anonymous_name, context=None):
""" return a session given a channel : create on with a registered user, or return false otherwise """
# get the avalable user of the channel
users = self.get_available_users(cr, uid, channel_id, context=context)
if len(users) == 0:
return False
user_id = random.choice(users).id
# create the session, and add the link with the given channel
Session = self.pool["im_chat.session"]
newid = Session.create(cr, uid, {'user_ids': [(4, user_id)], 'channel_id': channel_id, 'anonymous_name' : anonymous_name}, context=context)
return Session.session_info(cr, uid, [newid], context=context)
def test_channel(self, cr, uid, channel, context=None):
if not channel:
return {}
return {
'url': self.browse(cr, uid, channel[0], context=context or {}).web_page,
'type': 'ir.actions.act_url'
}
def get_info_for_chat_src(self, cr, uid, channel, context=None):
url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
chan = self.browse(cr, uid, channel, context=context)
return {
"url": url,
'buttonText': chan.button_text,
'inputPlaceholder': chan.input_placeholder,
'defaultMessage': chan.default_message,
"channelName": chan.name,
}
def join(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'user_ids': [(4, uid)]})
return True
def quit(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'user_ids': [(3, uid)]})
return True
class im_chat_session(osv.Model):
_inherit = 'im_chat.session'
def _get_fullname(self, cr, uid, ids, fields, arg, context=None):
""" built the complete name of the session """
result = {}
sessions = self.browse(cr, uid, ids, context=context)
for session in sessions:
names = []
for user in session.user_ids:
names.append(user.name)
if session.anonymous_name:
names.append(session.anonymous_name)
result[session.id] = ', '.join(names)
return result
_columns = {
'anonymous_name' : fields.char('Anonymous Name'),
'channel_id': fields.many2one("im_livechat.channel", "Channel"),
'fullname' : fields.function(_get_fullname, type="char", string="Complete name"),
}
def is_in_session(self, cr, uid, uuid, user_id, context=None):
""" return if the given user_id is in the session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if session.anonymous_name and user_id == openerp.SUPERUSER_ID:
return True
else:
return super(im_chat_session, self).is_in_session(cr, uid, uuid, user_id, context=context)
return False
def users_infos(self, cr, uid, ids, context=None):
""" add the anonymous user in the user of the session """
for session in self.browse(cr, uid, ids, context=context):
users_infos = super(im_chat_session, self).users_infos(cr, uid, ids, context=context)
if session.anonymous_name:
users_infos.append({'id' : False, 'name' : session.anonymous_name, 'im_status' : 'online'})
return users_infos
class LiveChatController(http.Controller):
@http.route('/im_livechat/support/<string:dbname>/<int:channel_id>', type='http', auth='none')
def support_page(self, dbname, channel_id, **kwargs):
registry, cr, uid, context = openerp.modules.registry.RegistryManager.get(dbname), request.cr, openerp.SUPERUSER_ID, request.context
info = registry.get('im_livechat.channel').get_info_for_chat_src(cr, uid, channel_id)
info["dbname"] = dbname
info["channel"] = channel_id
info["channel_name"] = registry.get('im_livechat.channel').read(cr, uid, channel_id, ['name'], context=context)["name"]
return request.render('im_livechat.support_page', info)
@http.route('/im_livechat/loader/<string:dbname>/<int:channel_id>', type='http', auth='none')
def loader(self, dbname, channel_id, **kwargs):
registry, cr, uid, context = openerp.modules.registry.RegistryManager.get(dbname), request.cr, openerp.SUPERUSER_ID, request.context
info = registry.get('im_livechat.channel').get_info_for_chat_src(cr, uid, channel_id)
info["dbname"] = dbname
info["channel"] = channel_id
info["username"] = kwargs.get("username", "Visitor")
return request.render('im_livechat.loader', info)
@http.route('/im_livechat/get_session', type="json", auth="none")
def get_session(self, channel_id, anonymous_name, **kwargs):
cr, uid, context, db = request.cr, request.uid or openerp.SUPERUSER_ID, request.context, request.db
reg = openerp.modules.registry.RegistryManager.get(db)
# if geoip, add the country name to the anonymous name
if hasattr(request, 'geoip'):
anonymous_name = anonymous_name + " ("+request.geoip.get('country_name', "")+")"
return reg.get("im_livechat.channel").get_channel_session(cr, uid, channel_id, anonymous_name, context=context)
@http.route('/im_livechat/available', type='json', auth="none")
def available(self, db, channel):
cr, uid, context, db = request.cr, request.uid or openerp.SUPERUSER_ID, request.context, request.db
reg = openerp.modules.registry.RegistryManager.get(db)
with reg.cursor() as cr:
return len(reg.get('im_livechat.channel').get_available_users(cr, uid, channel)) > 0
|
agpl-3.0
|
mtr/ttpd
|
lib/TTP/PayExMessage.py
|
1
|
5449
|
#! /usr/bin/python
# -*- coding: latin-1 -*-
# $Id: Message.py 662 2007-02-06 13:59:26Z mtr $
"""
An implementation of the TUC Transfer Protocol.
This module contains a TTP message class and an XML parser that
transforms XML into a TTP message.
Copyright (C) 2004, 2007 by Lingit AS
Modified 25-may-2007, Kristian Skarbø, [email protected].
"""
__version__ = "$Rev: 662 $"
__author__ = "Martin Thorsen Ranang"
import cStringIO
import logging
import Queue
import re
import socket
import sys
import time
import xml.sax
from contextlib import contextmanager
from CoreMessage import (Message, MessageAck, MessageRequest,
MessageResult, XML2Message, build, send,
receive)
from CorePayExMessage import PayExMessage
__all__ = ['Message',
'MessageAck',
'MessageRequest',
'MessageResult',
'XML2Message']
# These variables are injected from the encapsulating environment:
options = None # Current configuration options.
payex_log = None # A 'logging' instance.
def setup_module(configuration_options):
global options
global payex_log
options = configuration_options
payex_log = logging.getLogger('ttpd.payex')
def _convert_reply(TransId, ORName, Billing):
"""Jukser til noe som ligner på et svar fra det gamle EAS-systemet.
Returnerer tuppelen (meta, body) akkurat som recieve().
Et typisk resultat fra _recv():
<?xml version="1.0"?>
<MxHead>
<TransId>LINGSMSOUT</TransId>
<ORName>4798233020</ORName>
<Pri>0</Pri><Ack>0</Ack><Stat>0</Stat><Ref>0</Ref>
<Aux>
<Billing>2</Billing>
<InitIf>IP</InitIf>
<InitProto>REMOTE</InitProto>
</Aux>
<Enc>0</Enc>
<Len>1</Len>
</MxHead>
C
"""
head = '<?xml version="1.0"?><MxHead>' \
'<TransId>%s</TransId>' \
'<ORName>%s</ORName>' \
'<Pri>0</Pri><Ack>0</Ack><Stat>0</Stat><Ref>0</Ref>' \
'<Aux><Billing>%s</Billing>' \
'<InitIf>IP</InitIf><InitProto>REMOTE</InitProto></Aux>' \
'<Enc>0</Enc><Len>1</Len></MxHead>' \
% (TransId, ORName, Billing)
body = "C"
try:
meta = build(head)
except xml.sax.SAXParseException, info:
what = '%s: %s' % (sys.exc_info()[0], info)
w._setMessage("Server: TTPD. Error: %s." % what)
send(connection, w)
return None, '%s, data = %s.' % (what, (head, body))
return (meta, body)
def communicate(message, remote_address, parser = None, timeout = False):
"""Communicate message and return with the reply.
ENDRINGER (i hovedsak av Kristian Skarbø)
Arbeidshypotesen her er at når message.MxHead.TransId =
'LINGSMSOUT', så skal det sendes en utgående SMS.
Det som forsøksvis da gjøres er å late som om alt er som før, mens
vi egentlig sniker SMS-en ut via det nye 'forbedrede'
PxSms-grensesnittet. Forhåpentligvis vil da systemet tikke og gå
inntil vi får opphavsmannen til å foreta en ordentlig
omstrukturering.
"""
#try:
# TransId = message.MxHead.TransId
#except:
# TransId = ""
TransId = message.MxHead.TransId
payex_log.debug('TransId = "%s"', TransId)
if TransId == "LINGSMSOUT":
#Forsøker å sende via PxSMS
destination = orig_ORName = message.MxHead.ORName
# Gamlesystemet leverer strenger som '4798233020;1939;IPX',
# der alt foran første semikolon er det aktuelle
# mobilnummeret.
sc = destination.find(";")
if sc > -1:
destination = destination[:sc]
user_data = message._message
price = str(int(message.MxHead.Aux.Billing) * 50)
# FIXME: Det må da være mulig å få tak i en slags ID fra TTP?
order_id = "777"
if not options.payex_use_test_server:
account_number = options.payex_account_number
encryption_key = options.payex_encryption_key
remote_service_type = 'production'
else:
account_number = options.payex_test_account_number
encryption_key = options.payex_test_encryption_key
remote_service_type = 'test'
pem = PayExMessage(account_number, encryption_key,
options.originating_address, payex_log,
remote_service_type,
trace_file=options.payex_trace_file)
payex_log.debug('Initialized code for communicating with %s server.',
remote_service_type)
# Selve omrutingen:
(code, description, Billing, _destination) = \
pem.sendMessage(destination, user_data, price, order_id)
# Tilbake til gammelt format på pris.
Billing = str(int(Billing) / 50)
# Faker en respons videre i systemet. Sukk.
reply = _convert_reply(TransId, orig_ORName, Billing)
meta, body = reply
else:
# Bruker gamlemåten.
connection = connect(remote_address)
send(connection, message)
reply = receive(connection, parser, timeout)
connection.close()
meta, body = reply
return reply
def main():
"""Module mainline (for standalone execution).
"""
return
if __name__ == "__main__":
main()
|
gpl-3.0
|
scs/uclinux
|
user/python/python-2.4.4/Lib/test/list_tests.py
|
12
|
15389
|
"""
Tests common to list and UserList.UserList
"""
import sys
import os
import unittest
from test import test_support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(`a2`, `l2`)
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
def test_print(self):
d = self.type2test(xrange(200))
d.append(d)
d.extend(xrange(200,400))
d.append(d)
d.append(400)
try:
fo = open(test_support.TESTFN, "wb")
print >> fo, d,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
os.remove(test_support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, r.next)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0L] = 1
a[1L] = 2
a[2L] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2L] = 88
a[-1L] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setslice__, 0, 1, 5)
self.assertRaises(TypeError, a.__setslice__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:2L]
del a[0L:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2L:-1L]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:]
del a[:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1L:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
from decimal import Decimal
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
self.assertEqual(a.pop(Decimal(2)), 20)
self.assertRaises(IndexError, a.pop, Decimal(25))
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxint, 4*sys.maxint), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxint,-4*sys.maxint)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in xrange(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
return cmp(b, a)
u.sort(revcmp)
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = self.type2test(range(12))
z.sort(myComparison)
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, selfmodifyingComparison)
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super(CommonTest, self).test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assert_(u is u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
yield 23
def __len__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
|
gpl-2.0
|
ArcherSys/ArcherSys
|
eclipse/plugins/org.python.pydev.jython_4.5.5.201603221110/Lib/encodings/cp424.py
|
593
|
12311
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> SELECT
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> REQUIRED NEW LINE
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> GRAPHIC ESCAPE
u'\x8d' # 0x09 -> SUPERSCRIPT
u'\x8e' # 0x0A -> REPEAT
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
u'\x85' # 0x15 -> NEW LINE
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> UNIT BACK SPACE
u'\x8f' # 0x1B -> CUSTOMER USE ONE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> DIGIT SELECT
u'\x81' # 0x21 -> START OF SIGNIFICANCE
u'\x82' # 0x22 -> FIELD SEPARATOR
u'\x83' # 0x23 -> WORD UNDERSCORE
u'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> SET ATTRIBUTE
u'\x89' # 0x29 -> START FIELD EXTENDED
u'\x8a' # 0x2A -> SET MODE OR SWITCH
u'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
u'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> <reserved>
u'\x91' # 0x31 -> <reserved>
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> INDEX RETURN
u'\x94' # 0x34 -> PRESENTATION POSITION
u'\x95' # 0x35 -> TRANSPARENT
u'\x96' # 0x36 -> NUMERIC BACKSPACE
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> SUBSCRIPT
u'\x99' # 0x39 -> INDENT TABULATION
u'\x9a' # 0x3A -> REVERSE FORM FEED
u'\x9b' # 0x3B -> CUSTOMER USE THREE
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> <reserved>
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u05d0' # 0x41 -> HEBREW LETTER ALEF
u'\u05d1' # 0x42 -> HEBREW LETTER BET
u'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x44 -> HEBREW LETTER DALET
u'\u05d4' # 0x45 -> HEBREW LETTER HE
u'\u05d5' # 0x46 -> HEBREW LETTER VAV
u'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x48 -> HEBREW LETTER HET
u'\u05d8' # 0x49 -> HEBREW LETTER TET
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\u05d9' # 0x51 -> HEBREW LETTER YOD
u'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x53 -> HEBREW LETTER KAF
u'\u05dc' # 0x54 -> HEBREW LETTER LAMED
u'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x56 -> HEBREW LETTER MEM
u'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x58 -> HEBREW LETTER NUN
u'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u05e2' # 0x62 -> HEBREW LETTER AYIN
u'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x64 -> HEBREW LETTER PE
u'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x66 -> HEBREW LETTER TSADI
u'\u05e7' # 0x67 -> HEBREW LETTER QOF
u'\u05e8' # 0x68 -> HEBREW LETTER RESH
u'\u05e9' # 0x69 -> HEBREW LETTER SHIN
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\ufffe' # 0x70 -> UNDEFINED
u'\u05ea' # 0x71 -> HEBREW LETTER TAV
u'\ufffe' # 0x72 -> UNDEFINED
u'\ufffe' # 0x73 -> UNDEFINED
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\ufffe' # 0x75 -> UNDEFINED
u'\ufffe' # 0x76 -> UNDEFINED
u'\ufffe' # 0x77 -> UNDEFINED
u'\u2017' # 0x78 -> DOUBLE LOW LINE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\ufffe' # 0x80 -> UNDEFINED
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\xb8' # 0x9D -> CEDILLA
u'\ufffe' # 0x9E -> UNDEFINED
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\ufffe' # 0xAA -> UNDEFINED
u'\ufffe' # 0xAB -> UNDEFINED
u'\ufffe' # 0xAC -> UNDEFINED
u'\ufffe' # 0xAD -> UNDEFINED
u'\ufffe' # 0xAE -> UNDEFINED
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\ufffe' # 0xCB -> UNDEFINED
u'\ufffe' # 0xCC -> UNDEFINED
u'\ufffe' # 0xCD -> UNDEFINED
u'\ufffe' # 0xCE -> UNDEFINED
u'\ufffe' # 0xCF -> UNDEFINED
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\ufffe' # 0xDF -> UNDEFINED
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\ufffe' # 0xEE -> UNDEFINED
u'\ufffe' # 0xEF -> UNDEFINED
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\ufffe' # 0xFB -> UNDEFINED
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
Tatsh-ansible/ansible
|
test/units/executor/test_task_result.py
|
45
|
5605
|
# (c) 2016, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values (should not matter)
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertFalse(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
|
gpl-3.0
|
pepetreshere/odoo
|
addons/product_margin/tests/test_product_margin.py
|
2
|
2591
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
@tagged('post_install', '-at_install')
class TestProductMargin(AccountTestInvoicingCommon):
def test_product_margin(self):
''' In order to test the product_margin module '''
supplier = self.env['res.partner'].create({'name': 'Supplier'})
customer = self.env['res.partner'].create({'name': 'Customer'})
ipad = self.env['product.product'].create({
'name': 'Ipad',
'standard_price': 500.0,
'list_price': 750.0,
})
invoices = self.env['account.move'].create([
{
'move_type': 'in_invoice',
'partner_id': supplier.id,
'invoice_line_ids': [(0, 0, {'product_id': ipad.id, 'quantity': 10.0, 'price_unit': 300.0})],
},
{
'move_type': 'in_invoice',
'partner_id': supplier.id,
'invoice_line_ids': [(0, 0, {'product_id': ipad.id, 'quantity': 4.0, 'price_unit': 450.0})],
},
{
'move_type': 'out_invoice',
'partner_id': customer.id,
'invoice_line_ids': [(0, 0, {'product_id': ipad.id, 'quantity': 20.0, 'price_unit': 750.0})],
},
{
'move_type': 'out_invoice',
'partner_id': customer.id,
'invoice_line_ids': [(0, 0, {'product_id': ipad.id, 'quantity': 10.0, 'price_unit': 550.0})],
},
])
invoices.action_post()
result = ipad._compute_product_margin_fields_values()
# Sale turnover ( Quantity * Price Subtotal / Quantity)
sale_turnover = ((20.0 * 750.00) + (10.0 * 550.00))
# Expected sale (Total quantity * Sale price)
sale_expected = (750.00 * 30.0)
# Purchase total cost (Quantity * Unit price)
purchase_total_cost = ((10.0 * 300.00) + (4.0 * 450.00))
# Purchase normal cost ( Total quantity * Cost price)
purchase_normal_cost = (14.0 * 500.00)
total_margin = sale_turnover - purchase_total_cost
expected_margin = sale_expected - purchase_normal_cost
# Check total margin
self.assertEqual(result[ipad.id]['total_margin'], total_margin, "Wrong Total Margin.")
# Check expected margin
self.assertEqual(result[ipad.id]['expected_margin'], expected_margin, "Wrong Expected Margin.")
|
agpl-3.0
|
eenchev/idea-note-taking-app
|
env/lib/python2.7/site-packages/psycopg2/tz.py
|
23
|
4424
|
"""tzinfo implementations for psycopg2
This module holds two different tzinfo implementations that can be used as
the 'tzinfo' argument to datetime constructors, directly passed to psycopg
functions or used to set the .tzinfo_factory attribute in cursors.
"""
# psycopg/tz.py - tzinfo implementation
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import datetime
import time
ZERO = datetime.timedelta(0)
class FixedOffsetTimezone(datetime.tzinfo):
"""Fixed offset in minutes east from UTC.
This is exactly the implementation__ found in Python 2.3.x documentation,
with a small change to the `!__init__()` method to allow for pickling
and a default name in the form ``sHH:MM`` (``s`` is the sign.).
The implementation also caches instances. During creation, if a
FixedOffsetTimezone instance has previously been created with the same
offset and name that instance will be returned. This saves memory and
improves comparability.
.. __: http://docs.python.org/library/datetime.html#datetime-tzinfo
"""
_name = None
_offset = ZERO
_cache = {}
def __init__(self, offset=None, name=None):
if offset is not None:
self._offset = datetime.timedelta(minutes=offset)
if name is not None:
self._name = name
def __new__(cls, offset=None, name=None):
"""Return a suitable instance created earlier if it exists
"""
key = (offset, name)
try:
return cls._cache[key]
except KeyError:
tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
cls._cache[key] = tz
return tz
def __repr__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
% (offset_mins, self._name)
def __getinitargs__(self):
offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
return (offset_mins, self._name)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
if self._name is not None:
return self._name
else:
seconds = self._offset.seconds + self._offset.days * 86400
hours, seconds = divmod(seconds, 3600)
minutes = seconds / 60
if minutes:
return "%+03d:%d" % (hours, minutes)
else:
return "%+03d" % hours
def dst(self, dt):
return ZERO
STDOFFSET = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(datetime.tzinfo):
"""Platform idea of local timezone.
This is the exact implementation from the Python 2.3 documentation.
"""
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCAL = LocalTimezone()
# TODO: pre-generate some interesting time zones?
|
mit
|
freyes/flask-hello-world
|
alembic/env.py
|
1
|
2048
|
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.environ.get("DB_URI") #or config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
url=os.environ.get("DB_URI"),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
apache-2.0
|
rjshade/grpc
|
src/python/grpcio_health_checking/grpc_version.py
|
5
|
1658
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!!
VERSION='1.2.0.dev0'
|
bsd-3-clause
|
chris-hudson/impromptica
|
impromptica/probdata.py
|
1
|
7390
|
"""Probability data from various sources.
In this module, the "Essen corpus" refers to a corpus of 6,217 European folk
songs from the Essen Folksong Collection. The songs are available at
http://kern.ccarh.org/cgi-bin/ksbrowse?l=/essen and the list of songs used to
train the monophonic key and meter programs is published at
http://theory.esm.rochester.edu/temperley/music-prob/data/essen-train-list.
The "Kostka-Payne corpus" refers to 46 excerpts from the common-practice
repertoire, appearing in the workbook for the textbook "Tonal Harmony" by
Stefan Kostka and Dorothy Payne. The list of of the songs in the corpus is
published at http://theory.esm.rochester.edu/temperley/music-prob/data/kp-list.
The source code and data for "Music and Probability" (Temperley 2007), which
we use for much of our probalistic data, is published at
http://theory.esm.rochester.edu/temperley/music-prob/materials.html.
"""
import math
import numpy as np
import scipy.stats
from impromptica import settings
def build_distance_profile_data(standard_deviation):
"""Builds distance profile data using the given standard deviation (as a
distance between note values.)
The profile data is built from a Gaussian distribution, which is an
approximation for the actual data.
The result is a table, where the probability of note value j given
reference note value i is located at the index equal to the absolute
value of j - i.
"""
result = []
dist = scipy.stats.norm(0, standard_deviation)
for i in range(settings.MAX_NOTE + 1):
result.append(dist.pdf(i))
return result
def build_lognorm_tempo_profile_data(shape, scale, base_period, max_multiple):
"""Returns a log-Gaussian-derived likelihood table for periods of a
metrical level.
`base_period` is the time in seconds of the base period of which all other
period hypotheses will be integer multiples of.
`max_multiple` is the highest integer multiple by which the base period
will be multiplied by for period hypotheses.
"""
result = np.zeros(max_multiple)
dist = scipy.stats.lognorm(shape, scale=scale)
for i in range(1, max_multiple + 1):
result[i - 1] = dist.pdf(base_period * i)
# Divide the values in the table by the maximum if the maximum is greater
# than one.
max_value = np.max(result)
if max_value > 1:
result /= max_value
return result
def build_rayleigh_tempo_profile_data(scale, base_period, max_multiple):
"""Returns a Rayleigh-dervied likelihood table for periods of a metrical
level."""
result = np.zeros(max_multiple)
dist = scipy.stats.rayleigh(0, scale=scale)
for i in range(1, max_multiple + 1):
result[i - 1] = dist.pdf(base_period * i)
return result
def build_tempo_change_profile_data(
max_multiple,
standard_deviation=settings.TEMPO_CHANGE_STANDARD_DEVIATION):
"""Returns a table of the likelihood of transitions in tempo.
The table is indexed by the period of the new tempo and the period of the
old tempo, where the periods are integers of some base period and range
from one to the given `max_multiple`. If the tempos are not being measured
in terms of a common base period, consider quantizing the the ratio of the
new and old tempos to some fraction and using that with the table generated
by this function.
The table is zero-indexed but the likelihood estimates start at a period
value of one, so the likelihood of a tempo change of a/b will be located at
result[a-1][b-1].
As currently implemented, the likelihood of a tempo change is symmetric
across inversion, that is, the likelihood of a tempo change of a/b is equal
to the likelihood of a tempo change of b/a.
"""
# Precompute the transition probabilities for all possible transitions
# between periods. This probability is modeled as a Gaussian distribution
# centered at one. A transition from a period of n to m is assigned
# likelihood according to the value of the Gaussian distribution at
# (log(m/n))^2.
dist = scipy.stats.norm(scale=standard_deviation)
result = np.zeros((max_multiple, max_multiple))
for i in range(max_multiple):
for j in range(i + 1):
try:
result[i][j] = result[j][i] = dist.pdf(
math.pow(math.log((j + 1.) / (i + 1.)), 2.))
except FloatingPointError:
result[i][j] = 0.
# Normalize the distribution so that the highest likelihood value is 1.
highest = np.max(result, axis=1).max()
for i in range(max_multiple):
result[i] /= highest
return result
# This monophonic key profile generated from the Essen corpus provides
# probabilities of the offset of a note from the tonic note of a major key.
# This profile sums to 1 because it represents the probability that the next
# monophonic note is the given index offset from the tonic note of the key.
# Source: David Temperley. Music and Probability (Figure 4.7).
ESSEN_MAJOR_KEY_PROFILE_DATA = [
0.184,
0.001,
0.155,
0.003,
0.191,
0.109,
0.005,
0.214,
0.001,
0.078,
0.004,
0.055,
]
# This monophonic key profile generated from the Essen corpus provides
# probabilities of the offset of a note from the tonic note of a minor key.
# This profile sums to 1 because it represents the probability that the next
# monophonic note is the given index offset from the tonic note of the key.
# Source: David Temperley. Music and Probability (Figure 4.7).
ESSEN_MINOR_KEY_PROFILE_DATA = [
0.192,
0.005,
0.149,
0.179,
0.002,
0.144,
0.002,
0.201,
0.038,
0.012,
0.053,
0.022,
]
# This polyphonic key profile generated from the Kostka-Payne corpus provides
# probabilities of the offset of a note from the tonic note of a major key.
# This profile doesn't sum to 1 because we view notes as independent variables
# representing whether that note is present in a segment of the given key.
# Source: David Temperley. Music and Probability (Figure 6.4).
KP_MAJOR_KEY_PROFILE_DATA = [
0.748,
0.060,
0.488,
0.082,
0.670,
0.460,
0.096,
0.715,
0.104,
0.366,
0.057,
0.400
]
# This polyphonic key profile generated from the Kostka-Payne corpus provides
# probabilities of the offset of a note from the tonic note of a minor key.
# This profile doesn't sum to 1 because we view notes as independent variables
# representing whether that note is present in a segment of the given key.
# Source: David Temperley. Music and Probability (Figure 6.4).
KP_MINOR_KEY_PROFILE_DATA = [
0.712,
0.084,
0.474,
0.618,
0.049,
0.460,
0.105,
0.747,
0.404,
0.067,
0.133,
0.330
]
# This proximity profile generated from the Essen corpus provides
# probabilities of the distance of a note from the previous note.
# Source: David Temperley. Music and Probability (Table 4.1).
PROXIMITY_PROFILE_DATA = build_distance_profile_data(7.2)
# This range profile generated from the Essen corpus provides probabilities
# of the distance of a note from the central pitch. The central pitch is
# essentially the mean note value of over a song.
# Source: David Temperley. Music and Probability (Table 4.1).
RANGE_PROFILE_DATA = build_distance_profile_data(29.0)
|
mit
|
adw0rd/lettuce
|
tests/integration/lib/Django-1.2.5/django/core/serializers/pyyaml.py
|
204
|
1948
|
"""
YAML serializer.
Requires PyYaml (http://pyyaml.org/), but that's checked for in __init__.
"""
from StringIO import StringIO
import decimal
import yaml
from django.db import models
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
class DjangoSafeDumper(yaml.SafeDumper):
def represent_decimal(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', str(data))
DjangoSafeDumper.add_representer(decimal.Decimal, DjangoSafeDumper.represent_decimal)
class Serializer(PythonSerializer):
"""
Convert a queryset to YAML.
"""
internal_use_only = False
def handle_field(self, obj, field):
# A nasty special case: base YAML doesn't support serialization of time
# types (as opposed to dates or datetimes, which it does support). Since
# we want to use the "safe" serializer for better interoperability, we
# need to do something with those pesky times. Converting 'em to strings
# isn't perfect, but it's better than a "!!python/time" type which would
# halt deserialization under any other language.
if isinstance(field, models.TimeField) and getattr(obj, field.name) is not None:
self._current[field.name] = str(getattr(obj, field.name))
else:
super(Serializer, self).handle_field(obj, field)
def end_serialization(self):
yaml.dump(self.objects, self.stream, Dumper=DjangoSafeDumper, **self.options)
def getvalue(self):
return self.stream.getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of YAML data.
"""
if isinstance(stream_or_string, basestring):
stream = StringIO(stream_or_string)
else:
stream = stream_or_string
for obj in PythonDeserializer(yaml.load(stream), **options):
yield obj
|
gpl-3.0
|
Tejas-Subramanya/RYU_MEC
|
ryu/services/protocols/bgp/operator/commands/show/rib.py
|
17
|
2482
|
from route_formatter_mixin import RouteFormatterMixin
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.base import ActivityException
from ryu.services.protocols.bgp.operator.commands.responses import \
WrongParamResp
class RibBase(Command, RouteFormatterMixin):
supported_families = ['ipv4', 'ipv6', 'vpnv4', 'rtfilter', 'vpnv6']
class Rib(RibBase):
help_msg = 'show all routes for address family'
param_help_msg = '<address-family>'
command = 'rib'
def __init__(self, *args, **kwargs):
super(Rib, self).__init__(*args, **kwargs)
self.subcommands = {
'all': self.All}
def action(self, params):
if len(params) != 1 or params[0] not in self.supported_families:
return WrongParamResp()
from ryu.services.protocols.bgp.operator.internal_api \
import WrongParamError
try:
return CommandsResponse(
STATUS_OK,
self.api.get_single_rib_routes(params[0])
)
except WrongParamError as e:
return WrongParamResp(e)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return RibBase.cli_resp_formatter(resp)
return cls._format_family_header() + cls._format_family(resp.value)
class All(RibBase):
help_msg = 'show routes for all RIBs'
command = 'all'
def action(self, params):
if len(params) != 0:
return WrongParamResp()
ret = {}
try:
for family in self.supported_families:
ret[family] = self.api.get_single_rib_routes(family)
return CommandsResponse(STATUS_OK, ret)
except ActivityException as e:
return CommandsResponse(STATUS_ERROR, e)
@classmethod
def cli_resp_formatter(cls, resp):
if resp.status == STATUS_ERROR:
return RibBase.cli_resp_formatter(resp)
ret = cls._format_family_header()
for family, data in resp.value.items():
ret += 'Family: {0}\n'.format(family)
ret += cls._format_family(data)
return ret
|
apache-2.0
|
HwisooSo/gemV-update
|
tests/testing/results.py
|
7
|
9555
|
#!/usr/bin/env python2
#
# Copyright (c) 2016 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
import inspect
import pickle
import string
import sys
import xml.etree.cElementTree as ET
class UnitResult(object):
"""Results of a single test unit.
A test result can be one of:
- STATE_OK: Test ran successfully.
- STATE_SKIPPED: The test was skipped.
- STATE_ERROR: The test failed to run.
- STATE_FAILED: Test ran, but failed.
The difference between STATE_ERROR and STATE_FAILED is very
subtle. In a gem5 context, STATE_ERROR would mean that gem5 failed
to start or crashed, while STATE_FAILED would mean that a test
failed (e.g., statistics mismatch).
"""
STATE_OK = 0
STATE_SKIPPED = 1
STATE_ERROR = 2
STATE_FAILURE = 3
state_names = {
STATE_OK : "OK",
STATE_SKIPPED : "SKIPPED",
STATE_ERROR : "ERROR",
STATE_FAILURE : "FAILURE",
}
def __init__(self, name, state, message="", stderr="", stdout="",
runtime=0.0):
self.name = name
self.state = state
self.message = message
self.stdout = stdout
self.stderr = stderr
self.runtime = runtime
def skipped(self):
return self.state == UnitResult.STATE_SKIPPED
def success(self):
return self.state == UnitResult.STATE_OK
def state_name(self):
return UnitResult.state_names[self.state]
def __nonzero__(self):
return self.success() or self.skipped()
def __str__(self):
state_name = self.state_name()
status = "%s: %s" % (state_name, self.message) if self.message else \
state_name
return "%s: %s" % (self.name, status)
class TestResult(object):
"""Results for from a single test consisting of one or more units."""
def __init__(self, name, run_results=[], verify_results=[]):
self.name = name
self.results = run_results + verify_results
self.run_results = run_results
self.verify_results = verify_results
def success(self):
return self.success_run() and self.success_verify()
def success_run(self):
return all([ r.success() for r in self.run_results ])
def success_verify(self):
return all([ r.success() for r in self.verify_results ])
def failed(self):
return self.failed_run() or self.failed_verify()
def failed_run(self):
return any([ not r for r in self.run_results ])
def failed_verify(self):
return any([ not r for r in self.verify_results ])
def skipped(self):
return all([ r.skipped() for r in self.run_results ])
def changed(self):
return self.success_run() and self.failed_verify()
def runtime(self):
return sum([ r.runtime for r in self.results ])
def __nonzero__(self):
return all([ r for r in self.results ])
class ResultFormatter(object):
__metaclass__ = ABCMeta
def __init__(self, fout=sys.stdout, verbose=False):
self.verbose = verbose
self.fout = fout
@abstractmethod
def dump_suites(self, suites):
pass
class Pickle(ResultFormatter):
"""Save test results as a binary using Python's pickle
functionality.
"""
def __init__(self, **kwargs):
super(Pickle, self).__init__(**kwargs)
def dump_suites(self, suites):
pickle.dump(suites, self.fout, pickle.HIGHEST_PROTOCOL)
class Text(ResultFormatter):
"""Output test results as text."""
def __init__(self, **kwargs):
super(Text, self).__init__(**kwargs)
def dump_suites(self, suites):
fout = self.fout
for suite in suites:
print >> fout, "--- %s ---" % suite.name
for t in suite.results:
print >> fout, "*** %s" % t
if t and not self.verbose:
continue
if t.message:
print >> fout, t.message
if t.stderr:
print >> fout, t.stderr
if t.stdout:
print >> fout, t.stdout
class TextSummary(ResultFormatter):
"""Output test results as a text summary"""
def __init__(self, **kwargs):
super(TextSummary, self).__init__(**kwargs)
def test_status(self, suite):
if suite.skipped():
return "SKIPPED"
elif suite.changed():
return "CHANGED"
elif suite:
return "OK"
else:
return "FAILED"
def dump_suites(self, suites):
fout = self.fout
for suite in suites:
status = self.test_status(suite)
print >> fout, "%s: %s" % (suite.name, status)
class JUnit(ResultFormatter):
"""Output test results as JUnit XML"""
def __init__(self, translate_names=True, **kwargs):
super(JUnit, self).__init__(**kwargs)
if translate_names:
self.name_table = string.maketrans(
"/.",
".-",
)
else:
self.name_table = string.maketrans("", "")
def convert_unit(self, x_suite, test):
x_test = ET.SubElement(x_suite, "testcase",
name=test.name,
time="%f" % test.runtime)
x_state = None
if test.state == UnitResult.STATE_OK:
pass
elif test.state == UnitResult.STATE_SKIPPED:
x_state = ET.SubElement(x_test, "skipped")
elif test.state == UnitResult.STATE_FAILURE:
x_state = ET.SubElement(x_test, "failure")
elif test.state == UnitResult.STATE_ERROR:
x_state = ET.SubElement(x_test, "error")
else:
assert False, "Unknown test state"
if x_state is not None:
if test.message:
x_state.set("message", test.message)
msg = []
if test.stderr:
msg.append("*** Standard Errror: ***")
msg.append(test.stderr)
if test.stdout:
msg.append("*** Standard Out: ***")
msg.append(test.stdout)
x_state.text = "\n".join(msg)
return x_test
def convert_suite(self, x_suites, suite):
x_suite = ET.SubElement(x_suites, "testsuite",
name=suite.name.translate(self.name_table),
time="%f" % suite.runtime())
errors = 0
failures = 0
skipped = 0
for test in suite.results:
if test.state != UnitResult.STATE_OK:
if test.state == UnitResult.STATE_SKIPPED:
skipped += 1
elif test.state == UnitResult.STATE_ERROR:
errors += 1
elif test.state == UnitResult.STATE_FAILURE:
failures += 1
x_test = self.convert_unit(x_suite, test)
x_suite.set("errors", str(errors))
x_suite.set("failures", str(failures))
x_suite.set("skipped", str(skipped))
x_suite.set("tests", str(len(suite.results)))
return x_suite
def convert_suites(self, suites):
x_root = ET.Element("testsuites")
for suite in suites:
self.convert_suite(x_root, suite)
return x_root
def dump_suites(self, suites):
et = ET.ElementTree(self.convert_suites(suites))
et.write(self.fout, encoding="UTF-8")
|
bsd-3-clause
|
Avira/pootle
|
pootle/apps/contact/views.py
|
6
|
3679
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from django.core.urlresolvers import reverse
from django.views.generic import TemplateView
from contact_form.views import ContactFormView as OriginalContactFormView
from pootle.core.views import AjaxResponseMixin
from .forms import ContactForm, ReportForm
SUBJECT_TEMPLATE = 'Unit #%d (%s)'
BODY_TEMPLATE = '''
Unit: %s
Source: %s
Current translation: %s
Your question or comment:
'''
class ContactFormTemplateView(TemplateView):
template_name = 'contact_form/contact_form.html'
class ContactFormView(AjaxResponseMixin, OriginalContactFormView):
form_class = ContactForm
template_name = 'contact_form/xhr_contact_form.html'
def get_context_data(self, **kwargs):
ctx = super(ContactFormView, self).get_context_data(**kwargs)
# Provide the form action URL to use in the template that renders the
# contact dialog.
ctx.update({
'contact_form_url': reverse('pootle-contact-xhr'),
})
return ctx
def get_initial(self):
initial = super(ContactFormView, self).get_initial()
user = self.request.user
if user.is_authenticated():
initial.update({
'name': user.full_name,
'email': user.email,
})
return initial
def get_success_url(self):
# XXX: This is unused. We don't need a `/contact/sent/` URL, but
# the parent :cls:`ContactView` enforces us to set some value here
return reverse('pootle-contact')
class ReportFormView(ContactFormView):
form_class = ReportForm
def get_context_data(self, **kwargs):
ctx = super(ReportFormView, self).get_context_data(**kwargs)
# Provide the form action URL to use in the template that renders the
# contact dialog.
ctx.update({
'contact_form_url': reverse('pootle-contact-report-error'),
})
return ctx
def get_initial(self):
initial = super(ReportFormView, self).get_initial()
report = self.request.GET.get('report', False)
if report:
try:
from pootle_store.models import Unit
uid = int(report)
try:
unit = Unit.objects.select_related(
'store__translation_project__project',
).get(id=uid)
if unit.is_accessible_by(self.request.user):
unit_absolute_url = self.request.build_absolute_uri(
unit.get_translate_url()
)
initial.update({
'subject': SUBJECT_TEMPLATE % (
unit.id,
unit.store.translation_project.language.code
),
'body': BODY_TEMPLATE % (
unit_absolute_url,
unit.source,
unit.target
),
'report_email': unit.store.translation_project \
.project.report_email,
})
except Unit.DoesNotExist:
pass
except ValueError:
pass
return initial
|
gpl-3.0
|
fkorotkov/pants
|
tests/python/pants_test/base/test_build_root.py
|
11
|
2183
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from pants.base.build_root import BuildRoot
from pants.util.contextutil import environment_as, pushd, temporary_dir
from pants.util.dirutil import safe_mkdir, safe_mkdtemp, safe_rmtree, touch
class BuildRootTest(unittest.TestCase):
def setUp(self):
self.original_root = BuildRoot().path
self.new_root = os.path.realpath(safe_mkdtemp())
BuildRoot().reset()
def tearDown(self):
BuildRoot().reset()
safe_rmtree(self.new_root)
def test_via_set(self):
BuildRoot().path = self.new_root
self.assertEqual(self.new_root, BuildRoot().path)
def test_reset(self):
BuildRoot().path = self.new_root
BuildRoot().reset()
self.assertEqual(self.original_root, BuildRoot().path)
def test_via_pants_runner(self):
with temporary_dir() as root:
root = os.path.realpath(root)
touch(os.path.join(root, 'pants'))
with pushd(root):
self.assertEqual(root, BuildRoot().path)
BuildRoot().reset()
child = os.path.join(root, 'one', 'two')
safe_mkdir(child)
with pushd(child):
self.assertEqual(root, BuildRoot().path)
def test_temporary(self):
with BuildRoot().temporary(self.new_root):
self.assertEqual(self.new_root, BuildRoot().path)
self.assertEqual(self.original_root, BuildRoot().path)
def test_singleton(self):
self.assertEqual(BuildRoot().path, BuildRoot().path)
BuildRoot().path = self.new_root
self.assertEqual(BuildRoot().path, BuildRoot().path)
def test_not_found(self):
with temporary_dir() as root:
root = os.path.realpath(root)
with pushd(root):
self.assertRaises(BuildRoot.NotFoundError, lambda: BuildRoot().path)
def test_buildroot_override(self):
with temporary_dir() as root:
with environment_as(PANTS_BUILDROOT_OVERRIDE=root):
self.assertEqual(BuildRoot().path, root)
|
apache-2.0
|
with-git/tensorflow
|
tensorflow/contrib/layers/python/layers/summaries_test.py
|
112
|
3834
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import summaries as summaries_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SummariesTest(test.TestCase):
def test_summarize_scalar_tensor(self):
with self.test_session():
scalar_var = variables.Variable(1)
summary_op = summaries_lib.summarize_tensor(scalar_var)
self.assertEquals(summary_op.op.type, 'ScalarSummary')
def test_summarize_multidim_tensor(self):
with self.test_session():
tensor_var = variables.Variable([1, 2, 3])
summary_op = summaries_lib.summarize_tensor(tensor_var)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
def test_summarize_activation(self):
with self.test_session():
var = variables.Variable(1)
op = array_ops.identity(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 1)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu(self):
with self.test_session():
var = variables.Variable(1)
op = nn_ops.relu(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 2)
self.assertIn(u'SummaryTest/zeros', names)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_activation_relu6(self):
with self.test_session():
var = variables.Variable(1)
op = nn_ops.relu6(var, name='SummaryTest')
summary_op = summaries_lib.summarize_activation(op)
self.assertEquals(summary_op.op.type, 'HistogramSummary')
names = [op.op.name for op in ops.get_collection(ops.GraphKeys.SUMMARIES)]
self.assertEquals(len(names), 3)
self.assertIn(u'SummaryTest/zeros', names)
self.assertIn(u'SummaryTest/sixes', names)
self.assertIn(u'SummaryTest/activation', names)
def test_summarize_collection_regex(self):
with self.test_session():
var = variables.Variable(1)
array_ops.identity(var, name='Test1')
ops.add_to_collection('foo', array_ops.identity(var, name='Test2'))
ops.add_to_collection('foo', array_ops.identity(var, name='Foobar'))
ops.add_to_collection('foo', array_ops.identity(var, name='Test3'))
summaries = summaries_lib.summarize_collection('foo', r'Test[123]')
names = [op.op.name for op in summaries]
self.assertEquals(len(names), 2)
self.assertIn(u'Test2_summary', names)
self.assertIn(u'Test3_summary', names)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
iAmMrinal0/reddit
|
MessageArchiveSimple/messagearchivesimple.py
|
2
|
1943
|
#/u/Goldensights
import praw
import time
import datetime
'''USER CONFIG'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
PRINTFILE = 'messages.txt'
#This is the file, in the same directory as the .py file, where the messages are stored
SUBJECTLINE = "Newsletterly"
ITEMTYPE = 't4'
#The type of item to gather. t4 is a PM
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def work():
unread = r.get_unread(limit=MAXPOSTS)
results = []
for message in unread:
if ITEMTYPE in message.fullname:
print(message.id, message.subject, end=" ")
if SUBJECTLINE.lower() in message.subject.lower():
print(message.body)
messagedate = datetime.datetime.utcfromtimestamp(message.created_utc)
messagedate = datetime.datetime.strftime(messagedate, "%B %d %Y %H:%M UTC")
results += [message.fullname + " : " + message.author.name, messagedate, message.body, "\n\n"]
else:
print()
message.mark_as_read()
logfile = open(PRINTFILE, "a")
for result in results:
print(result, file=logfile)
logfile.close()
while True:
try:
work()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
time.sleep(WAIT)
|
mit
|
softlayer/softlayer-python
|
SoftLayer/CLI/order/item_list.py
|
2
|
5699
|
"""List package items."""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers import ordering
from SoftLayer.utils import lookup
COLUMNS = ['category', 'keyName', 'description', 'priceId']
COLUMNS_ITEM_PRICES = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction']
COLUMNS_ITEM_PRICES_LOCATION = ['keyName', 'priceId', 'Hourly', 'Monthly', 'Restriction']
@click.command()
@click.argument('package_keyname')
@click.option('--keyword', '-k', help="A word (or string) used to filter item names.")
@click.option('--category', '-c', help="Category code to filter items by")
@click.option('--prices', '-p', is_flag=True, help='Use --prices to list the server item prices, and to list the '
'Item Prices by location, add it to the --prices option using '
'location KeyName, e.g. --prices AMSTERDAM02')
@click.argument('location', required=False)
@environment.pass_env
def cli(env, package_keyname, keyword, category, prices, location=None):
"""List package items used for ordering.
The item keyNames listed can be used with `slcli order place` to specify
the items that are being ordered in the package.
.. Note::
Items with a numbered category, like disk0 or gpu0, can be included
multiple times in an order to match how many of the item you want to order.
::
# List all items in the VSI package
slcli order item-list CLOUD_SERVER
# List Ubuntu OSes from the os category of the Bare Metal package
slcli order item-list BARE_METAL_SERVER --category os --keyword ubuntu
"""
manager = ordering.OrderingManager(env.client)
tables = []
_filter = {'items': {}}
if keyword:
_filter['items']['description'] = {'operation': '*= %s' % keyword}
if category:
_filter['items']['categories'] = {'categoryCode': {'operation': '_= %s' % category}}
items = manager.list_items(package_keyname, filter=_filter)
sorted_items = sort_items(items)
categories = sorted_items.keys()
if prices:
_item_list_prices(categories, sorted_items, tables)
if location:
location_prices = manager.get_item_prices_by_location(location, package_keyname)
_location_item_prices(location_prices, location, tables)
else:
table_items_detail = formatting.Table(COLUMNS)
for category_name in sorted(categories):
for item in sorted_items[category_name]:
table_items_detail.add_row([category_name, item['keyName'], item['description'], get_price(item)])
tables.append(table_items_detail)
env.fout(formatting.listing(tables, separator='\n'))
def sort_items(items):
"""sorts the items into a dictionary of categories, with a list of items"""
sorted_items = {}
for item in items:
category = lookup(item, 'itemCategory', 'categoryCode')
if sorted_items.get(category) is None:
sorted_items[category] = []
sorted_items[category].append(item)
return sorted_items
def get_price(item):
"""Given an SoftLayer_Product_Item, returns its default price id"""
for price in item.get('prices', []):
if not price.get('locationGroupId'):
return price.get('id')
return 0
def _item_list_prices(categories, sorted_items, tables):
"""Add the item prices cost and capacity restriction to the table"""
table_prices = formatting.Table(COLUMNS_ITEM_PRICES)
for category in sorted(categories):
for item in sorted_items[category]:
for price in item['prices']:
if not price.get('locationGroupId'):
cr_max = get_item_price_data(price, 'capacityRestrictionMaximum')
cr_min = get_item_price_data(price, 'capacityRestrictionMinimum')
cr_type = get_item_price_data(price, 'capacityRestrictionType')
table_prices.add_row([item['keyName'], price['id'],
get_item_price_data(price, 'hourlyRecurringFee'),
get_item_price_data(price, 'recurringFee'),
"%s - %s %s" % (cr_min, cr_max, cr_type)])
tables.append(table_prices)
def get_item_price_data(price, item_attribute):
"""Given an SoftLayer_Product_Item_Price, returns its default price data"""
result = '-'
if item_attribute in price:
result = price[item_attribute]
return result
def _location_item_prices(location_prices, location, tables):
"""Add a location prices table to tables.
:param list location_prices : Location prices.
:param string location : Location.
:param list tables: Table list to add location prices table.
"""
location_prices_table = formatting.Table(COLUMNS_ITEM_PRICES_LOCATION, title="Item Prices for %s" % location)
location_prices_table.sortby = 'keyName'
location_prices_table.align = 'l'
for price in location_prices:
cr_max = get_item_price_data(price, 'capacityRestrictionMaximum')
cr_min = get_item_price_data(price, 'capacityRestrictionMinimum')
cr_type = get_item_price_data(price, 'capacityRestrictionType')
location_prices_table.add_row(
[price['item']['keyName'], price['id'],
get_item_price_data(price, 'hourlyRecurringFee'),
get_item_price_data(price, 'recurringFee'),
"%s - %s %s" % (cr_min, cr_max, cr_type)])
tables.append(location_prices_table)
|
mit
|
kdwink/intellij-community
|
python/lib/Lib/site-packages/django/core/management/commands/makemessages.py
|
73
|
15487
|
import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
from django.core.management.base import CommandError, BaseCommand
from django.utils.text import get_text_list
pythonize_re = re.compile(r'(?:^|\n)\s*//')
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
def handle_extensions(extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
for example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ','').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
# we don't want *.py files here because of the way non-*.py files
# are handled in make_messages() (they are copied to file.ext.py files to
# trick xgettext to parse them as Python files)
return set([x for x in ext_list if x != '.py'])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
return p.communicate()
def walk(root, topdown=True, onerror=None, followlinks=False):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(".", followlinks=symlinks):
for f in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, f))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
sys.stdout.write('ignoring file %s in %s\n' % (f, dirpath))
else:
all_files.extend([(dirpath, f)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
import django
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
m = plural_forms_re.search(open(django_po, 'rU').read())
if m:
if verbosity > 1:
sys.stderr.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def make_messages(locale=None, domain='django', verbosity='1', all=False,
extensions=None, symlinks=False, ignore_patterns=[], no_wrap=False):
"""
Uses the locale directory from the Django SVN tree or an application/
project to process all
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
# backwards compatible error message
if not sys.argv[0].endswith("make-messages.py"):
message = "Type '%s help %s' for usage.\n" % (os.path.basename(sys.argv[0]), sys.argv[1])
else:
message = "usage: make-messages.py -l <language>\n or: make-messages.py -a\n"
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = _popen('xgettext --version')[0]
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset." % match.group())
languages = []
if locale is not None:
languages.append(locale)
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
languages = [os.path.basename(l) for l in locale_dirs]
wrap = no_wrap and '--no-wrap' or ''
for locale in languages:
if verbosity > 0:
print "processing language", locale
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity, symlinks=symlinks):
file_base, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rU").read()
src = pythonize_re.sub('\n#', src)
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(src)
finally:
f.close()
cmd = (
'xgettext -d %s -L Perl %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile)
)
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError("errors happened while running xgettext on %s\n%s" % (file, errors))
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
if msgs:
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
if file_ext in extensions:
src = open(orig_file, "rU").read()
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(templatize(src, orig_file[2:]))
finally:
f.close()
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = (
'xgettext -d %s -L Python %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError("errors happened while running xgettext on %s\n%s" % (file, errors))
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+orig_file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
if msgs:
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
msgs, errors = _popen('msguniq %s --to-code=utf-8 "%s"' %
(wrap, potfile))
if errors:
raise CommandError("errors happened while running msguniq\n%s" % errors)
f = open(potfile, 'w')
try:
f.write(msgs)
finally:
f.close()
if os.path.exists(pofile):
msgs, errors = _popen('msgmerge %s -q "%s" "%s"' %
(wrap, pofile, potfile))
if errors:
raise CommandError("errors happened while running msgmerge\n%s" % errors)
elif not invoked_for_django:
msgs = copy_plural_forms(msgs, locale, domain, verbosity)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
f = open(pofile, 'wb')
try:
f.write(msgs)
finally:
f.close()
os.unlink(potfile)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files only for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Reexamines all source code and templates for new translation strings and updates all message files for all available languages.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
)
help = "Runs over the entire source tree of the current directory and pulls out all strings marked for translation. It creates (or updates) a message file in the conf/locale (in the django tree) or locale (for project and application) directory."
requires_model_validation = False
can_import_settings = False
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError("Command doesn't accept any arguments")
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
if domain == 'djangojs':
extensions = handle_extensions(extensions or ['js'])
else:
extensions = handle_extensions(extensions or ['html'])
if verbosity > 1:
sys.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions, symlinks, ignore_patterns, no_wrap)
|
apache-2.0
|
CydarLtd/ansible
|
lib/ansible/modules/system/lvol.py
|
63
|
17123
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <[email protected]>, Alexander Bulimov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) and the
volume does not already exist then the C(size) option is required.
required: false
active:
version_added: "2.2"
choices: [ "yes", "no" ]
default: "yes"
description:
- Whether the volume is activate and visible to the host.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
shrink:
version_added: "2.2"
description:
- shrink if current size is higher than size requested
required: false
default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol:
vg: firefly
lv: test
size: 512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol:
vg: firefly
lv: test
size: 512
pvs: /dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol:
vg: firefly
lv: lvcache
size: 512m
opts: --type cache-pool
# Create a logical volume of 512g.
- lvol:
vg: firefly
lv: test
size: 512g
# Create a logical volume the size of all remaining space in the volume group
- lvol:
vg: firefly
lv: test
size: 100%FREE
# Create a logical volume with special options
- lvol:
vg: firefly
lv: test
size: 512g
opts: -r 16
# Extend the logical volume to 1024m.
- lvol:
vg: firefly
lv: test
size: 1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol:
vg: firefly
lv: test
size: +100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol:
vg: firefly
lv: test
size: 100%PVS
# Resize the logical volume to % of VG
- lvol:
vg: firefly
lv: test
size: 80%VG
force: yes
# Reduce the logical volume to 512m
- lvol:
vg: firefly
lv: test
size: 512
force: yes
# Set the logical volume to 512m and do not try to shrink if size is lower than current one
- lvol:
vg: firefly
lv: test
size: 512
shrink: no
# Remove the logical volume.
- lvol:
vg: firefly
lv: test
state: absent
force: yes
# Create a snapshot volume of the test logical volume.
- lvol:
vg: firefly
lv: test
snapshot: snap1
size: 100m
# Deactivate a logical volume
- lvol:
vg: firefly
lv: test
active: false
# Create a deactivated logical volume
- lvol:
vg: firefly
lv: test
size: 512g
active: false
'''
import re
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1)),
'active': (parts[2][4] == 'a')
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
shrink=dict(type='bool', default='yes'),
active=dict(type='bool', default='yes'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found is None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
active = module.boolean(module.params['active'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit():
raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
msg = ''
if this_lv is None:
if state == 'present':
### create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif not size:
pass
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(
msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
(this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
)
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
if this_lv is not None:
if active:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
else:
lvchange_cmd = module.get_bin_path("lvchange", required=True)
rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
else:
module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
PetrDlouhy/django
|
tests/m2m_intermediary/models.py
|
128
|
1260
|
"""
Many-to-many relationships via an intermediary table
For many-to-many relationships that need extra fields on the intermediary
table, use an intermediary model.
In this example, an ``Article`` can have multiple ``Reporter`` objects, and
each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position``
field, which specifies the ``Reporter``'s position for the given article
(e.g. "Staff writer").
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __str__(self):
return self.headline
@python_2_unicode_compatible
class Writer(models.Model):
reporter = models.ForeignKey(Reporter)
article = models.ForeignKey(Article)
position = models.CharField(max_length=100)
def __str__(self):
return '%s (%s)' % (self.reporter, self.position)
|
bsd-3-clause
|
murfz/Sick-Beard
|
lib/hachoir_parser/audio/8svx.py
|
90
|
3949
|
"""
Audio Interchange File Format (AIFF) parser.
Author: Victor Stinner
Creation: 27 december 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
UInt16, UInt32, Float80, TimestampMac32,
RawBytes, NullBytes,
String, Enum, PascalString32)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler
from lib.hachoir_core.tools import alignValue
from lib.hachoir_parser.audio.id3 import ID3v2
CODEC_NAME = {
'ACE2': u"ACE 2-to-1",
'ACE8': u"ACE 8-to-3",
'MAC3': u"MAC 3-to-1",
'MAC6': u"MAC 6-to-1",
'NONE': u"None",
'sowt': u"Little-endian, no compression",
}
class Comment(FieldSet):
def createFields(self):
yield TimestampMac32(self, "timestamp")
yield PascalString32(self, "text")
def parseText(self):
yield String(self, "text", self["size"].value)
def parseID3(self):
yield ID3v2(self, "id3v2", size=self["size"].value*8)
def parseComment(self):
yield UInt16(self, "nb_comment")
for index in xrange(self["nb_comment"].value):
yield Comment(self, "comment[]")
def parseCommon(self):
yield UInt16(self, "nb_channel")
yield UInt32(self, "nb_sample")
yield UInt16(self, "sample_size")
yield Float80(self, "sample_rate")
yield Enum(String(self, "codec", 4, strip="\0", charset="ASCII"), CODEC_NAME)
def parseVersion(self):
yield TimestampMac32(self, "timestamp")
def parseSound(self):
yield UInt32(self, "offset")
yield UInt32(self, "block_size")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
class Chunk(FieldSet):
TAG_INFO = {
'COMM': ('common', "Common chunk", parseCommon),
'COMT': ('comment', "Comment", parseComment),
'NAME': ('name', "Name", parseText),
'AUTH': ('author', "Author", parseText),
'FVER': ('version', "Version", parseVersion),
'SSND': ('sound', "Sound data", parseSound),
'ID3 ': ('id3', "ID3", parseID3),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["type"].value
if tag in self.TAG_INFO:
self._name, self._description, self._parser = self.TAG_INFO[tag]
else:
self._parser = None
def createFields(self):
yield String(self, "type", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "size"))
size = self["size"].value
if size:
if self._parser:
for field in self._parser(self):
yield field
if size % 2:
yield NullBytes(self, "padding", 1)
else:
yield RawBytes(self, "data", size)
class HeightSVX(Parser):
PARSER_TAGS = {
"id": "8svx",
"category": "audio",
"file_ext": ("8svx",),
"mime": (u"audio/x-aiff",),
"min_size": 12*8,
"description": "8SVX (audio) format"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "FORM":
return "Invalid signature"
if self.stream.readBytes(8*8, 4) != "8SVX":
return "Invalid type"
return True
def createFields(self):
yield String(self, "signature", 4, "Signature (FORM)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize"))
yield String(self, "type", 4, "Form type (AIFF or AIFC)", charset="ASCII")
while not self.eof:
yield Chunk(self, "chunk[]")
def createDescription(self):
if self["type"].value == "AIFC":
return "Audio Interchange File Format Compressed (AIFC)"
else:
return "Audio Interchange File Format (AIFF)"
def createContentSize(self):
return self["filesize"].value * 8
|
gpl-3.0
|
SOKP/external_chromium_org
|
third_party/cython/src/Cython/Compiler/Builtin.py
|
90
|
20358
|
#
# Builtin Definitions
#
from Symtab import BuiltinScope, StructOrUnionScope
from Code import UtilityCode
from TypeSlots import Signature
import PyrexTypes
import Options
# C-level implementations of builtin types, functions and methods
iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
py_set_utility_code = UtilityCode.load("pyset_compat", "Builtins.c")
builtin_utility_code = {
'set' : py_set_utility_code,
'frozenset' : py_set_utility_code,
}
# mapping from builtins to their C-level equivalents
class _BuiltinOverride(object):
def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
utility_code=None, sig=None, func_type=None,
is_strict_signature=False, builtin_return_type=None):
self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
self.args, self.ret_type = args, ret_type
self.func_type, self.sig = func_type, sig
self.builtin_return_type = builtin_return_type
self.is_strict_signature = is_strict_signature
self.utility_code = utility_code
def build_func_type(self, sig=None, self_arg=None):
if sig is None:
sig = Signature(self.args, self.ret_type)
sig.exception_check = False # not needed for the current builtins
func_type = sig.function_type(self_arg)
if self.is_strict_signature:
func_type.is_strict_signature = True
if self.builtin_return_type:
func_type.return_type = builtin_types[self.builtin_return_type]
return func_type
class BuiltinAttribute(object):
def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
self.py_name = py_name
self.cname = cname or py_name
self.field_type_name = field_type_name # can't do the lookup before the type is declared!
self.field_type = field_type
def declare_in_type(self, self_type):
if self.field_type_name is not None:
# lazy type lookup
field_type = builtin_scope.lookup(self.field_type_name).type
else:
field_type = self.field_type or PyrexTypes.py_object_type
entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
entry.is_variable = True
class BuiltinFunction(_BuiltinOverride):
def declare_in_scope(self, scope):
func_type, sig = self.func_type, self.sig
if func_type is None:
func_type = self.build_func_type(sig)
scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
self.py_equiv, self.utility_code)
class BuiltinMethod(_BuiltinOverride):
def declare_in_type(self, self_type):
method_type, sig = self.func_type, self.sig
if method_type is None:
# override 'self' type (first argument)
self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
self_arg.not_none = True
self_arg.accept_builtin_subtypes = True
method_type = self.build_func_type(sig, self_arg)
self_type.scope.declare_builtin_cfunction(
self.py_name, method_type, self.cname, utility_code=self.utility_code)
builtin_function_table = [
# name, args, return, C API func, py equiv = "*"
BuiltinFunction('abs', "d", "d", "fabs",
is_strict_signature = True),
BuiltinFunction('abs', "f", "f", "fabsf",
is_strict_signature = True),
BuiltinFunction('abs', None, None, "__Pyx_abs_int",
utility_code = UtilityCode.load("abs_int", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_uint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_int_type, None)
],
is_strict_signature = True)),
BuiltinFunction('abs', None, None, "__Pyx_abs_long",
utility_code = UtilityCode.load("abs_long", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_ulong_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_long_type, None)
],
is_strict_signature = True)),
BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_ulonglong_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
],
is_strict_signature = True)),
BuiltinFunction('abs', "O", "O", "PyNumber_Absolute"),
BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
#('chr', "", "", ""),
#('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
#('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
utility_code = pyexec_globals_utility_code),
BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
utility_code = pyexec_utility_code),
BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
utility_code = pyexec_utility_code),
#('eval', "", "", ""),
#('execfile', "", "", ""),
#('filter', "", "", ""),
BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
utility_code=getattr3_utility_code), # Pyrex legacy
BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
utility_code=getattr3_utility_code),
BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
utility_code=getattr_utility_code),
BuiltinFunction('hasattr', "OO", "b", "PyObject_HasAttr"),
BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
#('hex', "", "", ""),
#('id', "", "", ""),
#('input', "", "", ""),
BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
utility_code = UtilityCode.load("Intern", "Builtins.c")),
BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
BuiltinFunction('len', "O", "z", "PyObject_Length"),
BuiltinFunction('locals', "", "O", "__pyx_locals"),
#('map', "", "", ""),
#('max', "", "", ""),
#('min', "", "", ""),
BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
#('oct', "", "", ""),
#('open', "ss", "O", "PyFile_FromString"), # not in Py3
#('ord', "", "", ""),
BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
utility_code = UtilityCode.load("pow2", "Builtins.c")),
#('range', "", "", ""),
#('raw_input', "", "", ""),
#('reduce', "", "", ""),
BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
BuiltinFunction('repr', "O", "O", "PyObject_Repr", builtin_return_type='str'),
#('round', "", "", ""),
BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
#('sum', "", "", ""),
#('type', "O", "O", "PyObject_Type"),
#('unichr', "", "", ""),
#('unicode', "", "", ""),
#('vars', "", "", ""),
#('zip', "", "", ""),
# Can't do these easily until we have builtin type entries.
#('typecheck', "OO", "i", "PyObject_TypeCheck", False),
#('issubtype', "OO", "i", "PyType_IsSubtype", False),
# Put in namespace append optimization.
BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
]
if not Options.old_style_globals:
builtin_function_table.append(
BuiltinFunction('globals', "", "O", "__Pyx_Globals",
utility_code=globals_utility_code))
# Builtin types
# bool
# buffer
# classmethod
# dict
# enumerate
# file
# float
# int
# list
# long
# object
# property
# slice
# staticmethod
# super
# str
# tuple
# type
# xrange
builtin_types_table = [
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
]),
("basestring", "PyBaseString_Type", [
BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("bytearray", "PyByteArray_Type", [
]),
("bytes", "PyBytes_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("str", "PyString_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
builtin_return_type='basestring',
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
]),
("tuple", "PyTuple_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
]),
("list", "PyList_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
]),
("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
("slice", "PySlice_Type", [BuiltinAttribute('start'),
BuiltinAttribute('stop'),
BuiltinAttribute('step'),
]),
# ("file", "PyFile_Type", []), # not in Py3
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear",
utility_code = py_set_utility_code),
# discard() and remove() have a special treatment for unhashable values
# BuiltinMethod("discard", "TO", "r", "PySet_Discard",
# utility_code = py_set_utility_code),
BuiltinMethod("add", "TO", "r", "PySet_Add",
utility_code = py_set_utility_code),
BuiltinMethod("pop", "T", "O", "PySet_Pop",
utility_code = py_set_utility_code)]),
("frozenset", "PyFrozenSet_Type", []),
]
types_that_construct_their_instance = set([
# some builtin types do not always return an instance of
# themselves - these do:
'type', 'bool', 'long', 'float', 'complex',
'bytes', 'unicode', 'bytearray',
'tuple', 'list', 'dict', 'set', 'frozenset'
# 'str', # only in Py3.x
# 'file', # only in Py2.x
])
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
("obj", PyrexTypes.py_object_type),
("len", PyrexTypes.c_py_ssize_t_type),
("itemsize", PyrexTypes.c_py_ssize_t_type),
("readonly", PyrexTypes.c_bint_type),
("ndim", PyrexTypes.c_int_type),
("format", PyrexTypes.c_char_ptr_type),
("shape", PyrexTypes.c_py_ssize_t_ptr_type),
("strides", PyrexTypes.c_py_ssize_t_ptr_type),
("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
("internal", PyrexTypes.c_void_ptr_type),
]),
('Py_complex', 'Py_complex',
[('real', PyrexTypes.c_double_type),
('imag', PyrexTypes.c_double_type),
])
]
# set up builtin scope
builtin_scope = BuiltinScope()
def init_builtin_funcs():
for bf in builtin_function_table:
bf.declare_in_scope(builtin_scope)
builtin_types = {}
def init_builtin_types():
global builtin_types
for name, cname, methods in builtin_types_table:
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
elif name == 'bool':
objstruct_cname = None
else:
objstruct_cname = 'Py%sObject' % name.capitalize()
the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
builtin_types[name] = the_type
for method in methods:
method.declare_in_type(the_type)
def init_builtin_structs():
for name, cname, attribute_types in builtin_structs_table:
scope = StructOrUnionScope(name)
for attribute_name, attribute_type in attribute_types:
scope.declare_var(attribute_name, attribute_type, None,
attribute_name, allow_pyobject=True)
builtin_scope.declare_struct_or_union(
name, "struct", scope, 1, None, cname = cname)
def init_builtins():
init_builtin_structs()
init_builtin_types()
init_builtin_funcs()
builtin_scope.declare_var(
'__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True)
global list_type, tuple_type, dict_type, set_type, frozenset_type
global bytes_type, str_type, unicode_type, basestring_type, slice_type
global float_type, bool_type, type_type, complex_type, bytearray_type
type_type = builtin_scope.lookup('type').type
list_type = builtin_scope.lookup('list').type
tuple_type = builtin_scope.lookup('tuple').type
dict_type = builtin_scope.lookup('dict').type
set_type = builtin_scope.lookup('set').type
frozenset_type = builtin_scope.lookup('frozenset').type
slice_type = builtin_scope.lookup('slice').type
bytes_type = builtin_scope.lookup('bytes').type
str_type = builtin_scope.lookup('str').type
unicode_type = builtin_scope.lookup('unicode').type
basestring_type = builtin_scope.lookup('basestring').type
bytearray_type = builtin_scope.lookup('bytearray').type
float_type = builtin_scope.lookup('float').type
bool_type = builtin_scope.lookup('bool').type
complex_type = builtin_scope.lookup('complex').type
init_builtins()
|
bsd-3-clause
|
fhaoquan/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/colorama/initialise.py
|
484
|
1297
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def reinit():
sys.stdout = wrapped_stdout
sys.stderr = wrapped_stdout
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
|
lgpl-3.0
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/future/backports/http/cookies.py
|
78
|
21569
|
####
# Copyright 2000 by Timothy O'Malley <[email protected]>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <[email protected]>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell ([email protected]) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
http.cookies module ported to python-future from Py3.3
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import chr, dict, int, str
from future.utils import PY2, as_native_str
#
# Import our required modules
#
import re
re.ASCII = 0 # for py2 compatibility
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceeding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_Translator = {
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
'\036' : '\\036', '\037' : '\\037',
# Because of the way browsers really handle cookies (as opposed
# to what the RFC says) we also encode , and ;
',' : '\\054', ';' : '\\073',
'"' : '\\"', '\\' : '\\\\',
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
}
def _quote(str, LegalChars=_LegalChars):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if all(c in LegalChars for c in str):
return str
else:
return '"' + _nulljoin(_Translator.get(s, s) for s in str) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(mystr):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(mystr) < 2:
return mystr
if mystr[0] != '"' or mystr[-1] != '"':
return mystr
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
mystr = mystr[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(mystr)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(mystr, i)
q_match = _QuotePatt.search(mystr, i)
if not o_match and not q_match: # Neither matched
res.append(mystr[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(mystr[i:k])
res.append(mystr[k+1])
i = k + 2
else: # OctalPatt matched
res.append(mystr[i:j])
res.append(chr(int(mystr[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "secure",
"httponly" : "httponly",
"version" : "Version",
}
_flags = set(['secure', 'httponly'])
def __init__(self):
# Set defaults
self.key = self.value = self.coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid Attribute %s" % K)
dict.__setitem__(self, K, V)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
# First we verify that the key isn't a reserved word
# Second we make sure it only contains legal characters
if key.lower() in self._reserved:
raise CookieError("Attempt to set a reserved key: %s" % key)
if any(c not in LegalChars for c in key):
raise CookieError("Illegal key value: %s" % key)
# It's a good key, so save it.
self.key = key
self.value = val
self.coded_value = coded_val
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
@as_native_str()
def __repr__(self):
if PY2 and isinstance(self.value, unicode):
val = str(self.value) # make it a newstr to remove the u prefix
else:
val = self.value
return '<%s: %s=%s>' % (self.__class__.__name__,
str(self.key), repr(val))
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key == "secure":
append(str(self._reserved[key]))
elif key == "httponly":
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
(?P<key> # Start of group 'key'
""" + _LegalCharsPatt + r"""+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
""" + _LegalCharsPatt + r"""* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
@as_native_str()
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
if PY2 and isinstance(value.value, unicode):
val = str(value.value) # make it a newstr to remove the u prefix
else:
val = value.value
l.append('%s=%s' % (str(key), repr(val)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, mystr, patt=_CookiePattern):
i = 0 # Our starting point
n = len(mystr) # Length of string
M = None # current morsel
while 0 <= i < n:
# Start looking for a cookie
match = patt.search(mystr, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
# Parse the key, value in case it's metainfo
if key[0] == "$":
# We ignore attributes which pertain to the cookie
# mechanism as a whole. See RFC 2109.
# (Does anyone care?)
if M:
M[key[1:]] = value
elif key.lower() in Morsel._reserved:
if M:
if value is None:
if key.lower() in Morsel._flags:
M[key] = True
else:
M[key] = _unquote(value)
elif value is not None:
rval, cval = self.value_decode(value)
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
|
mit
|
NicovincX2/Python-3.5
|
Algorithmique/Algorithme/Algorithme de tri/Tri par fusion (Merge sort)/merge_sort2.py
|
1
|
1238
|
# -*- coding: utf-8 -*-
import os
""" Merge Sort
----------
Uses divide and conquer to recursively divide and sort the list
Time Complexity: O(n log n)
Space Complexity: O(n) Auxiliary
Stable: Yes
Psuedo Code: CLRS. Introduction to Algorithms. 3rd ed. """
def merge(left, right):
"""
Takes two sorted sub lists and merges them in to a single sorted sub list
and returns it.
:param left: A list of sorted integers
:param right: A list of sorted integers
:rtype: A list of sorted integers
"""
result = []
n, m = 0, 0
while n < len(left) and m < len(right):
if left[n] <= right[m]:
result.append(left[n])
n += 1
else:
result.append(right[m])
m += 1
result += left[n:]
result += right[m:]
return result
def sort(seq):
"""
Takes a list of integers and sorts them in ascending order. This sorted
list is then returned.
:param seq: A list of integers
:rtype: A list of sorted integers
"""
if len(seq) <= 1:
return seq
middle = int(len(seq) // 2)
left = sort(seq[:middle])
right = sort(seq[middle:])
return merge(left, right)
os.system("pause")
|
gpl-3.0
|
esteinig/netviewP
|
program/linux/0.7/netview.py
|
1
|
28716
|
#!/usr/bin/env python
# NetView P v.0.7 - Linux
# Dependencies: PLINK
# Eike Steinig
# Zenger Lab, JCU
# https://github.com/esteinig/netview
import os
import time
import shutil
import argparse
import subprocess
import numpy as np
import multiprocessing as mp
import scipy.sparse.csgraph as csg
import scipy.spatial.distance as sd
from sklearn.neighbors import NearestNeighbors
def main():
commands = CommandLine()
dat = Data()
dat.prefix = commands.arg_dict['prefix']
dat.ploidy = commands.arg_dict['ploidy']
dat.missing = commands.arg_dict['missing']
if commands.arg_dict['visual']:
print('\nGenerated node attribute files only.\n')
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
dat.writeData(f='attributes')
makeProject(commands.arg_dict['project'] + '_attributes', commands.arg_dict['prefix'])
exit(1)
print()
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + " NETVIEW P v.0.7 ")
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "File =", commands.arg_dict['data_file'].upper())
if commands.arg_dict['plink']:
dat.filetype = 'plink'
dat.readData(commands.arg_dict['data_file'], f='plink', sep=commands.arg_dict['sep'])
elif commands.arg_dict['snps']:
dat.filetype = 'snps'
dat.readData(commands.arg_dict['data_file'], f='snp_mat', sep=commands.arg_dict['sep'])
else:
dat.filetype = 'dist'
dat.readData(commands.arg_dict['data_file'], f='matrix', sep=commands.arg_dict['sep'])
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
if dat.ploidy == 'diploid':
nsnp = dat.nSNP//2
else:
nsnp = dat.nSNP
print(get_time() + "\t" + "N =", str(dat.n).upper())
print(get_time() + "\t" + "SNPs =", str(nsnp).upper())
print(get_time() + "\t" + "Ploidy =", dat.ploidy.upper())
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "Quality Control =", str(commands.arg_dict['qc']).upper())
pipeline = Analysis(dat)
qc = False
if commands.arg_dict['qc'] and pipeline.data.filetype != 'dist':
qc_params = {'--mind': commands.arg_dict['mind'],
'--geno': commands.arg_dict['geno'],
'--maf': commands.arg_dict['maf'],
'--hwe': commands.arg_dict['hwe']}
pipeline.runPLINK(qc_parameters=qc_params, quality=True)
qc = True
if commands.arg_dict['mat'] and pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.data.writeData(file=commands.arg_dict['prefix'] + '_mat.dist', f='matrix')
makeProject(commands.arg_dict['project'] + '_dist', commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
exit(1)
elif commands.arg_dict['mat'] and pipeline.data.filetype == 'dist':
print('\nError. Input is already a Distance Matrix.\n')
exit(1)
if not commands.arg_dict['off']:
if pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.runNetView(tree=commands.arg_dict['tree'], start=commands.arg_dict['start'],
stop=commands.arg_dict['stop'], step=commands.arg_dict['step'],
algorithm=commands.arg_dict['algorithm'])
if qc:
pipeline.updateNodeAttributes(commands.arg_dict['attribute_file'])
pipeline.data.writeData(f='attributes')
makeProject(commands.arg_dict['project'], commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
def makeProject(project, prefix):
cwd = os.getcwd()
project_path = os.path.realpath(os.path.join(os.getcwd(), project))
plink_path = os.path.realpath(os.path.join(project_path, 'plink'))
network_path = os.path.realpath(os.path.join(project_path, 'networks'))
other_path = os.path.realpath(os.path.join(project_path, 'other'))
node_path = os.path.realpath(os.path.join(project_path, 'nodes'))
if os.path.exists(project_path):
shutil.rmtree(project_path)
architecture = [project_path, plink_path, network_path, other_path, node_path]
for directory in architecture:
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
for name in os.listdir(cwd):
if name.endswith('.edges'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, network_path)
if name.endswith('.dist'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, other_path)
if name.endswith('.nat'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, node_path)
elif name.startswith(prefix + '_plink_in'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
os.remove(pathname)
elif name.startswith(prefix + '_plink'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, plink_path)
elif name.endswith('_qc.csv'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, other_path)
#### Functions for Multiprocessing ####
def netview(matrix, k, mst, algorithm, tree):
nbrs = NearestNeighbors(n_neighbors=k+1, algorithm=algorithm).fit(matrix)
adj_knn = nbrs.kneighbors_graph(matrix).toarray()
np.fill_diagonal(adj_knn, 0)
adj_mknn = (adj_knn == adj_knn.T) * adj_knn
if tree:
adj = mst + adj_mknn
else:
adj = adj_mknn
adjacency = np.tril(adj)
mst_edges = np.argwhere(adjacency < 1)
adjacency[adjacency > 0] = 1.
edges = np.argwhere(adjacency != 0)
weights = matrix[edges[:, 0], edges[:, 1]]
return [k, edges, weights, adjacency, mst_edges]
def netview_callback(k):
print(get_time() + "\t" + ' k=' + str(k[0]))
def get_time():
return time.strftime("[%H:%M:%S]")
#### Command Line Module ####
class CommandLine:
def __init__(self):
self.parser = argparse.ArgumentParser(description='NetView P v0.7', add_help=True)
self.setParser()
self.args = self.parser.parse_args()
self.arg_dict = vars(self.args)
def setParser(self):
data_type = self.parser.add_mutually_exclusive_group(required=True)
# Required Options
self.parser.add_argument('-f', dest='data_file', required=True, type=str,
help="Name of Data File")
data_type.add_argument('-p', dest='plink', action='store_true',
help="PLINK format (.ped/.map)")
data_type.add_argument('-s', dest='snps', action='store_true',
help="SNP matrix (N x SNPs)")
data_type.add_argument('-m', dest='dist', action='store_true',
help="Distance matrix (N x N)")
self.parser.add_argument('-a', dest='attribute_file', default='', type=str, required=True,
help="Node attribute file (.csv)")
# MAIN Options
self.parser.add_argument('--quality', dest='qc', action='store_true', default=False,
help="Quality control in PLINK (OFF)")
self.parser.add_argument('--distance', dest='distance', default='asd', type=str,
help="Distance measure for SNPs: hamming, asd, correlation... (asd)")
self.parser.add_argument('--algorithm', dest='algorithm', default='auto', type=str,
help="Algorithm for NN: auto, ball_tree, kd_tree, brute (brute)")
self.parser.add_argument('--mst-off', dest='tree', action='store_false', default=True,
help="Disable minimum spanning tree (OFF)")
self.parser.add_argument('--ploidy', dest='ploidy', default='diploid', type=str,
help="Set ploidy: haploid, diploid (diploid.")
self.parser.add_argument('--missing', dest='missing', default='0', type=str,
help="Set missing character (0)")
self.parser.add_argument('--prefix', dest='prefix', default='project', type=str,
help="Set prefix (project)")
self.parser.add_argument('--project', dest='project', default=time.strftime("%d-%m-%Y_%H-%M-%S"), type=str,
help="Output project name (timestamp)")
self.parser.add_argument('--sep', dest='sep', default='\t', type=str,
help="Delimiter for data file (\\t).")
# PARAMETER Options
self.parser.add_argument('--mind', dest='mind', default=0.1, type=float,
help="Filter samples > missing rate (0.1)")
self.parser.add_argument('--geno', dest='geno', default=0.1, type=float,
help="Filter SNPs > missing rate (0.1)")
self.parser.add_argument('--maf', dest='maf', default=0.01, type=float,
help="Filter SNPs < minor allele frequency (0.01)")
self.parser.add_argument('--hwe', dest='hwe', default=0.001, type=float,
help="Filter SNPs failing HWE test at P < (0.001)")
self.parser.add_argument('--start', dest='start', default=10, type=int,
help="Start at k = (10)")
self.parser.add_argument('--stop', dest='stop', default=40, type=int,
help="Stop at k = (40)")
self.parser.add_argument('--step', dest='step', default=10, type=int,
help="Step by k = (10)")
# PIPELINE Options
self.parser.add_argument('--visual', dest='visual', action='store_true', default=False,
help="Node attributes ONLY (OFF)")
self.parser.add_argument('--off', dest='off', action='store_true', default=False,
help="Switch off NetView and run only QC (OFF).")
self.parser.add_argument('--matrix', dest='mat', action='store_true', default=False,
help="Generate distance matrix ONLY (OFF).")
#### Data Module ####
class Data:
### DATA ATTRIBUTES ###
def __init__(self):
self.prefix = "project"
self.ploidy = 'diploid'
self.missing = "0"
self.n = 0
self.nSNP = 0
self.ids = [] # IDs
self.alleles = []
self.snps = np.arange(5) # Array (N x SNPs)
self.biodata = [] # List/Alignment of BioPython SeqRecords
self.meta_data = {}
self.snp_data = {}
self.matrices = {}
self.networks = {}
self.matrix = np.arange(5) # Current Matrix
self.netview_runs = 0
self.filetype = ''
### DATA READER ###
def readData(self, file, f, sep="\t", header=False, add_col=0):
def _read_nexus(file, sep=sep):
snp_position = []
snps = []
matrix = False
for line in file:
content = line.strip().split(sep)
if matrix == True:
if ";" in line:
break
snp_position.append(content[0])
snps.append(content[1:])
else:
if "dimensions" in line:
self.n = int(content[1].split("=")[1])
self.nSNP = int(content[2].split("=")[1][:-1])
elif "taxlabels" in line:
self.ids = content[1:]
elif "matrix" in line:
matrix = True
self.snps = np.array([list(i) for i in zip(*snps)]) # ordered by N
self.snp_data['snp_id'] = [''.join(p.split("_")[:-1]) for p in snp_position]
self.snp_data['snp_position'] = [p.split("_")[-1] for p in snp_position]
self.filetype = 'nexus'
def _read_raxml(file, sep=sep):
header = []
ids = []
snps = []
for line in file:
content = line.strip().split(sep)
if header:
ids.append(content[0])
snps.append(content[1])
else:
header = content
self.n = header[0]
self.nSNP = header[1]
self.ids = ids
self.snps = np.array(snps)
self.filetype = 'raxml'
def _read_plink(file, filename, sep=sep):
map_name = filename.split(".")[0] + ".map"
map_file = open(map_name)
ids = []
meta = []
snps = []
for line in file:
content = line.strip().split(sep)
ids.append(content[1])
snps.append(content[6:])
meta.append(content[:6])
self.ids = ids
self.snps = np.array(snps)
self.nSNP = len(self.snps[0])
self.n = len(self.ids)
self.meta_data["pop"] = [i[0] for i in meta]
self.meta_data["dam"] = [i[2] for i in meta]
self.meta_data["sire"] = [i[3] for i in meta]
self.meta_data["sex"] = [i[4] for i in meta]
self.meta_data["phenotype"] = [i[5] for i in meta]
map_content = [line.strip().split() for line in map_file]
map_content = list(zip(*map_content))
self.snp_data['snp_chromosome'] = list(map_content[0])
self.snp_data['snp_id'] = list(map_content[1])
self.snp_data['snp_genetic_distance'] = list(map_content[2])
self.snp_data['snp_position'] = list(map_content[3])
map_file.close()
self.filetype = 'plink'
def _read_matrix(file, header=header, add_col=add_col, sep=sep):
content = [line.strip().split(sep)[add_col:] for line in file]
if header:
content = content[1:]
matrix = np.array([list(map(float, ind)) for ind in content])
self.matrix = matrix
self.matrices['input'] = matrix
return matrix
def _read_snp_mat(file, sep):
matrix = np.array([line.strip().split(sep) for line in file])
self.snps = matrix
self.n = len(matrix[:, 1])
self.nSNP = len(matrix[1, :])
if self.ploidy == 'diploid':
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP//2)]
else:
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP)]
def _read_attributes(file, sep=sep):
content = [line.strip().split(sep) for line in file]
head = content[0]
content = list(zip(*content[1:]))
for i in range(len(head)):
self.meta_data[head[i]] = content[i]
self.ids = list(content[0])
## Main Read ##
infile = open(file)
f = f.lower()
if f == "nexus":
_read_nexus(infile, sep)
elif f =="raxml":
_read_raxml(infile, sep)
elif f == "plink":
_read_plink(infile, file, sep)
elif f == "matrix":
matrix = _read_matrix(infile, header, add_col, sep)
elif f == 'snp_mat':
_read_snp_mat(infile, sep)
elif f == 'attributes':
_read_attributes(infile, sep)
else:
print("File format not supported.")
raise IOError
infile.close()
if f != 'attributes':
alleles = np.unique(self.snps).tolist()
if self.missing in alleles:
alleles.remove(self.missing)
self.alleles = alleles
if f == 'matrix':
return matrix
### DATA WRITER ###
def writeData(self, f, file='data.out', sep="\t"):
def _write_raxml(outfile, sep):
outfile.write(str(self.n) + sep + str(self.nSNP) + "\n")
for i in range(self.n):
outfile.write(self.ids[i] + sep + ''.join(self.snps[i]) + "\n")
def _write_nexus(outfile, sep):
taxlabels = " ".join(self.ids)
header = '#nexus\nbegin data;\ndimensions ntax=' + str(self.n) + ' nchar=' + str(self.nSNP) + \
';\nformat symbols="AGCT" gap=. datatype=nucleotide;\ntaxlabels ' + taxlabels + ';\nmatrix\n'
tail = ";\nend;"
snps = list(zip(*self.snps))
outfile.write(header)
for i in range(self.nSNP):
if 'snp_chromosome' in self.snp_data.keys():
outfile.write(self.snp_data['snp_chromosome'][i] + "_")
else:
outfile.write(sep)
if 'snp_id' in self.snp_data.keys():
outfile.write(self.snp_data['snp_id'][i] + sep)
else:
outfile.write("SNP" + str(i) + sep)
outfile.write(sep.join(snps[i]) + "\n")
outfile.write(tail)
def _write_plink(outfile, filename, sep):
mapname = filename.split('.')[0] + ".map"
for i in range(self.n):
if 'pop' in self.meta_data.keys():
outfile.write(self.meta_data['pop'][i] + sep)
else:
outfile.write("NA" + sep)
if self.ids:
outfile.write(self.ids[i] + sep)
else:
outfile.write("N" + str(i+1) + sep)
if 'dam' in self.meta_data.keys():
outfile.write(self.meta_data['dam'][i] + sep)
else:
outfile.write("0" + sep)
if 'sire' in self.meta_data.keys():
outfile.write(self.meta_data['sire'][i] + sep)
else:
outfile.write("0" + sep)
if 'sex' in self.meta_data.keys():
outfile.write(self.meta_data['sex'][i] + sep)
else:
outfile.write("0" + sep)
if 'phenotype' in self.meta_data.keys():
outfile.write(self.meta_data['phenotype'][i] + sep)
else:
outfile.write("0" + sep)
outfile.write(sep.join(self.snps[i]) + "\n")
map_file = open(mapname, "w")
if 'snp_id' in self.snp_data:
for i in range(len(self.snp_data['snp_id'])):
if 'snp_chromosome' in self.snp_data.keys():
map_file.write(self.snp_data['snp_chromosome'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_id' in self.snp_data.keys():
map_file.write(self.snp_data['snp_id'][i] + sep)
else:
map_file.write("SNP" + str(i+1) + sep)
if 'snp_genetic_distance' in self.snp_data.keys():
map_file.write(self.snp_data['snp_genetic_distance'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_position' in self.snp_data.keys():
map_file.write(self.snp_data['snp_position'][i] + sep + "\n")
else:
map_file.write("0" + sep + "\n")
map_file.close()
def _write_metadata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
ordered_keys = sorted([key for key in self.meta_data.keys()])
outfile.write("Isolate")
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.n):
if self.ids:
outfile.write(self.ids[i])
else:
outfile.write("N" + str(1))
for key in ordered_keys:
outfile.write(sep + self.meta_data[key][i])
outfile.write("\n")
def _write_snpdata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
snp_data = dict(self.snp_data)
ordered_keys = sorted([key for key in snp_data.keys()])
outfile.write("SNP" + sep)
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.nSNP):
outfile.write("SNP_" + str(i))
for key in ordered_keys:
outfile.write(sep + snp_data[key][i])
outfile.write("\n")
def _write_attributes():
for key, value in self.meta_data.items():
outname = self.prefix + '_' + key + '.nat'
out = open(outname, 'w')
out.write('ID\t' + self.prefix + '_' + key + '\n')
for i in range(len(value)):
out.write(self.ids[i] + '\t' + value[i] + '\n')
out.close()
## Main Write ##
if f == 'attributes':
_write_attributes()
else:
filename = file
outfile = open(filename, "w")
f = f.lower()
if f == "nexus":
_write_nexus(outfile, sep)
elif f =="raxml":
_write_raxml(outfile, sep)
elif f == "plink":
_write_plink(outfile, file, sep)
elif f == "matrix":
np.savetxt(filename, self.matrix, fmt='%.9f', delimiter=sep)
elif f == "meta":
_write_metadata(outfile, sep)
elif f == "snp":
_write_snpdata(outfile, sep)
else:
raise IOError("File format not supported.")
outfile.close()
def __str__(self):
return ('-----------\nNumber of Individuals: %i\nNumber of SNPs: %i\nPloidy: %s\n-----------\n') % \
(self.n, self.nSNP, self.ploidy)
#### Analysis Module ####
class Analysis:
def __init__(self, data):
self.data = data
def getDistance(self, target='snps', distance='hamming'):
print(get_time() + "\t" + 'Distance = ' + distance.upper())
if self.data.filetype == 'dist':
target = 'matrix'
if target == 'matrix':
matrix = np.array(self.data.matrix)
else:
# Convert alleles to numbers (e.g. A -> 1, B -> 2) for use in scipy.spatial.distance.pdist()
allele_codes = {}
for i in range(len(self.data.alleles)):
allele_codes[self.data.alleles[i]] = int(i+1)
allele_codes[self.data.missing] = 0 # missing can not be 1 to i
snps = self.data.snps
for a, code in allele_codes.items():
snps[snps == a] = code
matrix = snps
if distance == 'asd':
self.runPLINK(asd=True)
self.data.readData(file=self.data.prefix + '_plink.mdist', f='matrix', sep=' ')
else:
matrix = sd.squareform(sd.pdist(matrix, distance))
self.data.matrix = matrix
self.data.matrices[distance] = self.data.matrix
return matrix
def runPLINK(self, qc_parameters={}, commandstring='', asd=False, quality=False):
if self.data.ploidy == 'haploid':
raise AttributeError('Haploid genotypes not supported for PLINK.')
if commandstring:
subprocess.call(commandstring)
else:
self.data.writeData(file=self.data.prefix + '_plink_in.ped', f='plink')
if quality and qc_parameters:
command = ['plink', '--noweb', '--file', self.data.prefix + '_plink_in']
for key, value in qc_parameters.items():
command.append(key)
command.append(str(value))
command.append('--recode')
command.append('--out')
command.append(self.data.prefix + '_plink_qc')
subprocess.call(command, stdout=subprocess.DEVNULL)
if os.path.exists(self.data.prefix + '_plink_qc.ped'):
self.data.readData(file=self.data.prefix + '_plink_qc.ped', f='plink', sep=' ')
if asd:
subprocess.call(['plink', '--noweb', '--file', self.data.prefix + '_plink_in', '--cluster', '--distance-matrix',
'--out', self.data.prefix + '_plink'], stdout=subprocess.DEVNULL)
def updateNodeAttributes(self, attribute_file):
if os.path.isfile(self.data.prefix + '_plink_qc.irem'):
infile = open(self.data.prefix + '_plink_qc.irem')
to_remove = [line.strip().split()[1] for line in infile]
infile.close()
infile = open(attribute_file)
outname = attribute_file.split('.')[0] + '_qc.csv'
outfile = open(outname, 'w')
for line in infile:
content = line.strip().split(',')
if content[0] not in to_remove:
outfile.write(line)
infile.close()
outfile.close()
self.data.readData(file=outname, f='attributes', sep=',')
def runNetView(self, tree=True, start=10, stop=40, step=10, algorithm='auto'):
print(get_time() + "\t" + "Minimum Spanning Tree = " + str(tree).upper())
print(get_time() + "\t" + "Nearest Neighbour = " + algorithm.upper())
print(get_time() + "\t" + "k = " + str(start) + " - " + str(stop) + ' (by ' + str(step) + ')')
print(get_time() + "\t" + "---------------------------------")
self.data.netview_runs += 1
matrix = self.data.matrix
if tree:
mst = csg.minimum_spanning_tree(matrix)
mst = mst.toarray()
self.data.networks['mst_' + str(self.data.netview_runs)] = mst
mst = mst + mst.T
else:
mst = None
pool = mp.Pool()
networks = [pool.apply_async(netview, args=(matrix, k, mst, algorithm, tree,), callback=netview_callback)
for k in range(start, stop+1, step)]
pool.close()
pool.join()
for item in networks:
result = item.get()
edges_array = result[1]
edges = result[1].tolist()
mst_edges = result[4].tolist()
self.data.networks['netview_k' + str(result[0]) + '_' + str(self.data.netview_runs)] = result[1:]
filename = self.data.prefix + '_netview_k' + str(result[0]) +\
"_" + str(self.data.netview_runs) + '.edges'
out = open(filename, "w")
out.write('Source\tTarget\tDistance\tMST\n')
for i in range(len(edges)):
out.write(str(self.data.ids[edges[i][0]]) + "\t" + str(self.data.ids[edges[i][1]]) +
"\t" + str(matrix[edges[i][0], edges[i][1]]))
if tree:
if edges[i] in mst_edges:
out.write('\t' + 'red\n')
else:
out.write('\t' + 'grey\n')
else:
out.write("\n")
if not tree:
singletons = np.setdiff1d(np.arange(self.data.n), edges_array.flatten()).tolist()
if singletons:
for node in singletons:
out.write(str(node) + '\n')
out.close()
main()
|
gpl-2.0
|
tjsavage/rototutor_djangononrel
|
django/views/decorators/http.py
|
94
|
6854
|
"""
Decorators for views based on HTTP headers.
"""
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from calendar import timegm
from datetime import timedelta
from email.Utils import formatdate
from django.utils.decorators import decorator_from_middleware, available_attrs
from django.utils.http import parse_etags, quote_etag
from django.utils.log import getLogger
from django.middleware.http import ConditionalGetMiddleware
from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
logger = getLogger('django.request')
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path),
extra={
'status_code': 405,
'request': request
}
)
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return wraps(func, assigned=available_attrs(func))(inner)
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accept the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accept the POST method."
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
if etag_func:
res_etag = etag_func(request, *args, **kwargs)
else:
res_etag = None
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
res_last_modified = formatdate(timegm(dt.utctimetuple()))[:26] + 'GMT'
else:
res_last_modified = None
else:
res_last_modified = None
response = None
if not ((if_match and (if_modified_since or if_none_match)) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
res_last_modified == if_modified_since)):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
logger.warning('Precondition Failed: %s' % request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags)):
logger.warning('Precondition Failed: %s' % request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif (not if_none_match and if_modified_since and
request.method == "GET" and
res_last_modified == if_modified_since):
response = HttpResponseNotModified()
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = res_last_modified
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
|
bsd-3-clause
|
styxit/HTPC-Manager
|
libs/cherrypy/lib/profiler.py
|
88
|
6501
|
"""Profiler tools for CherryPy.
CherryPy users
==============
You can profile any of your pages as follows::
from cherrypy.lib import profiler
class Root:
p = profile.Profiler("/path/to/profile/dir")
def index(self):
self.p.run(self._index)
index.exposed = True
def _index(self):
return "Hello, world!"
cherrypy.tree.mount(Root())
You can also turn on profiling for all requests
using the ``make_app`` function as WSGI middleware.
CherryPy developers
===================
This module can be used whenever you make changes to CherryPy,
to get a quick sanity-check on overall CP performance. Use the
``--profile`` flag when running the test suite. Then, use the ``serve()``
function to browse the results in a web browser. If you run this
module from the command line, it will call ``serve()`` for you.
"""
def new_func_strip_path(func_name):
"""Make profiler output more readable by adding ``__init__`` modules' parents"""
filename, line, name = func_name
if filename.endswith("__init__.py"):
return os.path.basename(filename[:-12]) + filename[-12:], line, name
return os.path.basename(filename), line, name
try:
import profile
import pstats
pstats.func_strip_path = new_func_strip_path
except ImportError:
profile = None
pstats = None
import os, os.path
import sys
import warnings
from cherrypy._cpcompat import BytesIO
_count = 0
class Profiler(object):
def __init__(self, path=None):
if not path:
path = os.path.join(os.path.dirname(__file__), "profile")
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def run(self, func, *args, **params):
"""Dump profile data into self.path."""
global _count
c = _count = _count + 1
path = os.path.join(self.path, "cp_%04d.prof" % c)
prof = profile.Profile()
result = prof.runcall(func, *args, **params)
prof.dump_stats(path)
return result
def statfiles(self):
""":rtype: list of available profiles.
"""
return [f for f in os.listdir(self.path)
if f.startswith("cp_") and f.endswith(".prof")]
def stats(self, filename, sortby='cumulative'):
""":rtype stats(index): output of print_stats() for the given profile.
"""
sio = BytesIO()
if sys.version_info >= (2, 5):
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
s.strip_dirs()
s.sort_stats(sortby)
s.print_stats()
else:
# pstats.Stats before Python 2.5 didn't take a 'stream' arg,
# but just printed to stdout. So re-route stdout.
s = pstats.Stats(os.path.join(self.path, filename))
s.strip_dirs()
s.sort_stats(sortby)
oldout = sys.stdout
try:
sys.stdout = sio
s.print_stats()
finally:
sys.stdout = oldout
response = sio.getvalue()
sio.close()
return response
def index(self):
return """<html>
<head><title>CherryPy profile data</title></head>
<frameset cols='200, 1*'>
<frame src='menu' />
<frame name='main' src='' />
</frameset>
</html>
"""
index.exposed = True
def menu(self):
yield "<h2>Profiling runs</h2>"
yield "<p>Click on one of the runs below to see profiling data.</p>"
runs = self.statfiles()
runs.sort()
for i in runs:
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (i, i)
menu.exposed = True
def report(self, filename):
import cherrypy
cherrypy.response.headers['Content-Type'] = 'text/plain'
return self.stats(filename)
report.exposed = True
class ProfileAggregator(Profiler):
def __init__(self, path=None):
Profiler.__init__(self, path)
global _count
self.count = _count = _count + 1
self.profiler = profile.Profile()
def run(self, func, *args):
path = os.path.join(self.path, "cp_%04d.prof" % self.count)
result = self.profiler.runcall(func, *args)
self.profiler.dump_stats(path)
return result
class make_app:
def __init__(self, nextapp, path=None, aggregate=False):
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
nextapp
the WSGI application to wrap, usually an instance of
cherrypy.Application.
path
where to dump the profiling output.
aggregate
if True, profile data for all HTTP requests will go in
a single file. If False (the default), each HTTP request will
dump its profile data into a separate file.
"""
if profile is None or pstats is None:
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, try `sudo apt-get install python-profiler`. "
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
warnings.warn(msg)
self.nextapp = nextapp
self.aggregate = aggregate
if aggregate:
self.profiler = ProfileAggregator(path)
else:
self.profiler = Profiler(path)
def __call__(self, environ, start_response):
def gather():
result = []
for line in self.nextapp(environ, start_response):
result.append(line)
return result
return self.profiler.run(gather)
def serve(path=None, port=8080):
if profile is None or pstats is None:
msg = ("Your installation of Python does not have a profile module. "
"If you're on Debian, try `sudo apt-get install python-profiler`. "
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
warnings.warn(msg)
import cherrypy
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': "production",
})
cherrypy.quickstart(Profiler(path))
if __name__ == "__main__":
serve(*tuple(sys.argv[1:]))
|
mit
|
citizen-stig/pyjtt
|
pyjtt/gui.py
|
1
|
26426
|
from datetime import timedelta, datetime
import queue
from urllib import error
from functools import partial
import logging
from PyQt5 import QtWidgets, QtCore, QtGui
from pyjtt import base_classes, core, utils, workers
from pyjtt.widgets import login_window, main_window, worklog_window
__author__ = "Nikolay Golub ([email protected])"
__copyright__ = "Copyright 2012 - 2018, Nikolay Golub"
__license__ = "GPL"
logger = logging.getLogger(__name__)
MINIMUM_WORKLOG_SIZE_MINUTES = 5
class PyJTTException(Exception):
pass
class NotSelectedException(PyJTTException):
pass
class LoginWindow(QtWidgets.QDialog):
"""Checks user credentials and handles login"""
def __init__(self,
jirahost,
login,
password,
save_credentials=False,
parent=None):
super(LoginWindow, self).__init__(parent)
self.ui = login_window.Ui_loginWindow()
self.ui.setupUi(self)
if jirahost:
self.ui.lineEditHostAddress.setText(jirahost)
if login:
self.ui.lineEditLogin.setText(login)
if password:
self.ui.lineEditPassword.setText(password)
if save_credentials:
self.ui.checkBoxSaveCredentials.setCheckState(save_credentials)
self.ui.buttonBox.accepted.connect(self.handle_login_input)
self.ui.buttonBox.rejected.connect(self.user_exit)
def handle_login_input(self):
"""Initial check of data provided by the user"""
jira_host = str(self.ui.lineEditHostAddress.text())
login = str(self.ui.lineEditLogin.text())
password = str(self.ui.lineEditPassword.text())
if not jira_host:
QtWidgets.QMessageBox.warning(
self, 'Login error', 'Enter JIRA host address')
elif not login:
QtWidgets.QMessageBox.warning(
self, 'Login error', 'Enter login')
elif not password:
QtWidgets.QMessageBox.warning(
self, 'Login error', 'Enter password')
else:
logger.debug('Starting Login')
if self._login(jira_host, login, password):
self.accept()
def user_exit(self):
"""Standard exit"""
self.reject()
def _login(self, jira_host, login, password):
"""Actual login"""
logger.debug('Trying to get user info')
try:
app = core.TimeTrackerApp(jira_host, login, password)
app.get_user_info()
return True
except (error.HTTPError, error.URLError) as general_error:
# TODO: add dict with advices, based on return code
QtWidgets.QMessageBox.warning(self, 'Login Error',
'Error {0} . Check URL or try to '
'login via Web'.format(general_error))
# QtWidgets.QMessageBox.warning(self, 'Login Error',
# 'Error %s %s. Check URL or try to login via Web'
# % (str(general_error.code), general_error.reason))
class SystemTrayIcon(QtWidgets.QSystemTrayIcon):
def __init__(self, icon, parent=None):
super(SystemTrayIcon, self).__init__(icon, parent)
menu = QtWidgets.QMenu(parent)
exitAction = menu.addAction("Exit")
exitAction.triggered.connect(parent.close)
self.setContextMenu(menu)
class WorklogWindow(QtWidgets.QDialog):
"""Widget for working with worklog data.
It allows to set date, time ranges and comment to JIRA worklog
"""
def __init__(self, title, worklog_entry, parent=None):
super(WorklogWindow, self).__init__(parent)
self.ui = worklog_window.Ui_WorklogWindow()
self.ui.setupUi(self)
self.setWindowTitle(title)
self.worklog_entry = worklog_entry
self.fill_fields(self.worklog_entry)
self.ui.buttonBox.accepted.connect(self.save_worklog_data)
self.ui.buttonBox.rejected.connect(self.user_exit)
self.ui.timeStartEdit.timeChanged.connect(self.start_time_changed)
self.ui.timeEndEdit.timeChanged.connect(self.end_time_changed)
@staticmethod
def datetime_to_qtime(timestamp):
"""Converts Python datetime timestamp to QTime"""
return QtCore.QTime(timestamp.hour, timestamp.minute)
def fill_fields(self, worklog_entry):
"""Set up widget fields with worklog_entry data"""
self.ui.labelIssue.setText(worklog_entry.issue.key + ': ' + worklog_entry.issue.summary)
self.ui.dateEdit.setDate(worklog_entry.started)
self.ui.timeStartEdit.setTime(self.datetime_to_qtime(worklog_entry.started))
self.ui.timeEndEdit.setTime(self.datetime_to_qtime(worklog_entry.ended))
self.ui.timeEndEdit.setMinimumTime(self.datetime_to_qtime(worklog_entry.started
+ timedelta(minutes=MINIMUM_WORKLOG_SIZE_MINUTES)))
self.ui.plainTextCommentEdit.setPlainText(worklog_entry.comment)
self.refresh_spent()
def refresh_spent(self):
spent = 'Time spent: ' + \
utils.get_time_spent_string(
self.ui.timeEndEdit.dateTime().toPyDateTime() -
self.ui.timeStartEdit.dateTime().toPyDateTime())
self.ui.labelSpent.setText(spent)
def start_time_changed(self):
"""Control bounds of worklog time ranges"""
start_time = self.ui.timeStartEdit.time()
self.ui.timeEndEdit.setMinimumTime(start_time.addSecs(MINIMUM_WORKLOG_SIZE_MINUTES * 60))
self.refresh_spent()
def end_time_changed(self):
"""Control bounds of worklog time ranges"""
end_time = self.ui.timeEndEdit.time()
self.ui.timeStartEdit.setMaximumTime(end_time.addSecs(MINIMUM_WORKLOG_SIZE_MINUTES * -60))
self.refresh_spent()
def save_worklog_data(self):
"""Create worklog object from user input"""
date = self.ui.dateEdit.date().toPyDate()
start = self.ui.timeStartEdit.time().toPyTime()
end = self.ui.timeEndEdit.time().toPyTime()
self.worklog_entry.started = datetime.combine(date, start)
self.worklog_entry.ended = datetime.combine(date, end)
self.worklog_entry.comment = str(self.ui.plainTextCommentEdit.toPlainText())
self.accept()
def user_exit(self):
"""Standard exit"""
self.reject()
class MainWindow(QtWidgets.QMainWindow):
"""Core widget of the GUI"""
ui = main_window.Ui_MainWindow()
number_of_workers = 10
tasks_queue = queue.Queue()
def __init__(self, jirahost, login, password, parent=None):
super(MainWindow, self).__init__()
self.init_ui()
# Initialize app
self.app = core.TimeTrackerApp(jirahost, login, password)
# Initialize workers
self.worker_threads = []
self.tracking_thread = None
self.init_workers()
self.init_signals()
# Request assigned issues
get_assigned_issues_job = partial(self.app.get_user_assigned_issues)
self.tasks_queue.put(get_assigned_issues_job)
# Names convention:
# Underscore for auxiliary methods which aren't called by singals
# Core stuff
def init_workers(self):
for i in range(self.number_of_workers):
worker = workers.NoResultThread(self.tasks_queue)
self.worker_threads.append(worker)
worker.start()
worker.task_started.connect(self.set_status)
worker.task_done.connect(self.refresh_ui)
worker.task_done.connect(self.clear_status)
worker.exception_raised.connect(self.show_error)
def init_signals(self):
# Signals
self.ui.FindIssue.clicked.connect(self.get_issue)
self.ui.tableIssues.clicked.connect(self.set_issue_selected)
self.ui.dateDayWorklogEdit.dateChanged.connect(self.print_worklog_table)
self.ui.actionReresh_issue.triggered.connect(self.refresh_issue)
self.ui.actionFull_refresh.triggered.connect(self.full_refresh)
self.ui.startStopTracking.clicked.connect(self.online_tracking)
self.ui.lineIssueKey.textChanged.connect(self.filter_issues_table)
self.ui.tableIssues.doubleClicked.connect(self.add_worklog_entry)
self.ui.tableDayWorklog.doubleClicked.connect(self.change_worklog_entry)
self.ui.editWorklog.clicked.connect(self.change_worklog_entry)
self.ui.removeWorklog.clicked.connect(self.remove_worklog_entry)
self.ui.actionRemove_issue_from_cache.triggered.connect(self.remove_issue_from_local)
self.ui.actionLogout.triggered.connect(self.logout)
self.ui.tray_icon.activated.connect(self.tray_click)
def closeEvent(self, event):
buttons = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
message = 'Are you sure you want to close this application?'
confirmation = QtWidgets.QMessageBox.question(self,
'Exit',
message,
buttons=buttons)
if confirmation == QtWidgets.QMessageBox.Yes:
for thread in self.worker_threads:
thread.quit()
super(MainWindow, self).closeEvent(event)
else:
event.ignore()
def logout(self):
relogin_window = LoginWindow(self.app.jira_accessor.jirahost,
self.app.jira_accessor.login,
self.app.jira_accessor.password,
parent=self)
relogin_result = relogin_window.exec_()
if relogin_result == QtWidgets.QDialog.Accepted:
jira_host = relogin_window.ui.lineEditHostAddress.text()
login = relogin_window.ui.lineEditLogin.text()
password = relogin_window.ui.lineEditPassword.text()
config = utils.init_config()
config.set('main', 'jirahost', jira_host)
config.set('main', 'login', login)
if relogin_window.ui.checkBoxSaveCredentials.isChecked():
config.set('main', 'password', password)
else:
config.remove_option('main', 'password')
utils.write_config(config)
self.app = core.TimeTrackerApp(jira_host, login, password)
else:
self.close()
def _get_selected_issue_from_table(self):
if self.ui.tableIssues.selectedItems():
selected_indexes = self.ui.tableIssues.selectedIndexes()
container_coordinates = selected_indexes[0]
issue_container = self.ui.tableIssues.item(container_coordinates.row(),
container_coordinates.column())
issue = issue_container.issue
return issue
else:
QtWidgets.QMessageBox.warning(self,
'Error',
'Please, select issue first')
raise NotSelectedException('Issue is not selected by user')
def _get_selected_worklog_from_table(self):
if self.ui.tableDayWorklog.selectedItems():
pass
selected_indexes = self.ui.tableDayWorklog.selectedIndexes()
container_coordinates = selected_indexes[0]
worklog_container = self.ui.tableDayWorklog.item(container_coordinates.row(),
container_coordinates.column())
worklog_entry = worklog_container.worklog_entry
return worklog_entry
else:
QtWidgets.QMessageBox.warning(self,
'Error',
'Please, select worklog first')
raise NotSelectedException('Worklog entry is not selected by user')
def get_issue(self):
issue_keys = str(self.ui.lineIssueKey.text())
for issue_key in issue_keys.split(','):
issue_key = issue_key.strip().upper()
if utils.check_jira_issue_key(issue_key):
get_issue_task = partial(self.app.get_issue, issue_key)
self.tasks_queue.put(get_issue_task)
def add_worklog_entry(self):
issue = self._get_selected_issue_from_table()
ended = datetime.now()
started = ended - timedelta(hours=1)
comment = ''
worklog_entry = base_classes.JiraWorklogEntry(issue,
started,
ended,
comment)
add_window = WorklogWindow('Add worklog',
worklog_entry,
parent=self)
edit_result = add_window.exec_()
if edit_result == QtWidgets.QDialog.Accepted:
job = partial(self.app.add_worklog_entry, add_window.worklog_entry)
self.tasks_queue.put(job)
def change_worklog_entry(self):
worklog_entry = self._get_selected_worklog_from_table()
edit_window = WorklogWindow('Add worklog',
worklog_entry,
parent=self)
edit_result = edit_window.exec_()
if edit_result == QtWidgets.QDialog.Accepted:
job = partial(self.app.update_worklog_entry,
edit_window.worklog_entry)
self.tasks_queue.put(job)
def remove_worklog_entry(self):
worklog_entry = self._get_selected_worklog_from_table()
confirmation = QtWidgets.QMessageBox.question(
self,
'Remove Worklog',
'Are you really want to remove this worklog',
buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if confirmation == QtWidgets.QMessageBox.Yes:
# Push the job to the queue
job = partial(self.app.remove_worklog_entry, worklog_entry)
self.tasks_queue.put(job)
def remove_issue_from_local(self):
issue = self._get_selected_issue_from_table()
confirmation = QtWidgets.QMessageBox.question(
self,
'Remove Issue',
'Are you really want to remove this issue from local cache',
buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
if confirmation == QtWidgets.QMessageBox.Yes:
job = partial(self.app.remove_issue, issue)
self.tasks_queue.put(job)
def start_online_tracking(self):
try:
issue = self._get_selected_issue_from_table()
self.ui.labelSelectedIssue.issue = issue
self.tracking_thread = workers.TrackingWorker()
self.tracking_thread.timer_updated.connect(self.update_timer)
self.tracking_thread.start()
# Change UI
stop_icon = QtGui.QIcon()
stop_icon.addPixmap(QtGui.QPixmap(":/res/icons/stop.ico"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.ui.startStopTracking.setText('Stop Tracking')
self.ui.startStopTracking.setIcon(stop_icon)
except NotSelectedException:
self.ui.startStopTracking.setChecked(False)
def stop_online_tracking(self):
# Get tracked time
started, ended = self.tracking_thread.started, datetime.now()
# Check that tracked more than minimum worklog minutes
if (ended - started).total_seconds() > (MINIMUM_WORKLOG_SIZE_MINUTES * 60.0):
# Get issue
issue = self.ui.labelSelectedIssue.issue
# Create worklog
worklog_entry = base_classes.JiraWorklogEntry(issue, started, ended, '')
# Ask for options: Add, Cancel, Continue tracking, Open worklog before add
info_msg = 'Do you want to add this worklog:\n' + \
'Issue: {issue_key}\n'.format(issue_key=worklog_entry.issue.key) + \
'Started: {started}\n'.format(started=worklog_entry.started) + \
'Ended: {ended}\n'.format(ended=worklog_entry.ended) + \
'Time spent: {spent}\n'.format(spent=worklog_entry.get_timespent_string()) + \
'Or edit before adding?'
confirmation = QtWidgets.QMessageBox.question(self,
'Add New Worklog',
info_msg,
buttons=QtWidgets.QMessageBox.Yes
| QtWidgets.QMessageBox.No
| QtWidgets.QMessageBox.Cancel
| QtWidgets.QMessageBox.Open)
if confirmation == QtWidgets.QMessageBox.Yes:
# Push the job to the queue
job = partial(self.app.add_worklog_entry, worklog_entry)
self.tasks_queue.put(job)
elif confirmation == QtWidgets.QMessageBox.Open:
edit_window = WorklogWindow('Edit worklog',
worklog_entry,
parent=self)
edit_result = edit_window.exec_()
if edit_result == QtWidgets.QDialog.Accepted:
job = partial(self.app.add_worklog_entry, edit_window.worklog_entry)
self.tasks_queue.put(job)
else:
# Cancelling stop and continue tracking
return
elif confirmation == QtWidgets.QMessageBox.Cancel:
# Cancelling stop and continue tracking
return
# Terminate thread
self.tracking_thread.terminate()
self.tracking_thread = None
# Clear UI
self.ui.startStopTracking.setText('Start Tracking')
self.ui.labelTimeSpent.setText('00:00:00')
start_icon = QtGui.QIcon()
start_icon.addPixmap(QtGui.QPixmap(":/res/icons/start.ico"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.ui.startStopTracking.setIcon(start_icon)
def refresh_issue(self):
if not self.ui.tableIssues.isHidden():
try:
issue = self._get_selected_issue_from_table()
refresh_job = partial(self.app.refresh_issue, issue)
self.tasks_queue.put(refresh_job)
except NotSelectedException:
pass
else:
# TODO: add extraction from worklog table
pass
def full_refresh(self):
for issue in self.app.get_all_issues():
refresh_job = partial(self.app.refresh_issue, issue)
self.tasks_queue.put(refresh_job)
def relogin(self):
pass
# GUI stuff
def init_ui(self):
"""Method for UI customization"""
self.ui.setupUi(self)
self.ui.dateDayWorklogEdit.setDate(QtCore.QDate.currentDate())
# Status bar customization
self.ui.spinning_img = QtGui.QMovie(':/res/img/spinning-progress6.gif')
self.ui.spinning_label = QtWidgets.QLabel()
self.ui.spinning_label.setMovie(self.ui.spinning_img)
self.ui.spinning_label.hide()
self.ui.status_msg = QtWidgets.QLabel('Synchronizing...')
self.ui.statusbar.addWidget(self.ui.spinning_label)
self.ui.statusbar.addWidget(self.ui.status_msg)
self.ui.status_msg.hide()
# Tray icon
self.ui.tray_icon = SystemTrayIcon(
QtGui.QIcon(":/res/icons/clock.ico"),
self)
self.ui.tray_icon.show()
def refresh_ui(self):
if self.ui.lineIssueKey.text():
self.filter_issues_table()
else:
self.print_issues_table(self.app.get_all_issues())
self.print_worklog_table()
def show_error(self, exception):
logger.error(str(exception))
info_msg = 'Error appears:\n'
info_msg += str(exception)
QtWidgets.QMessageBox.warning(self, 'Warning', info_msg)
def set_status(self):
self.ui.spinning_label.show()
self.ui.status_msg.show()
self.ui.spinning_img.start()
def clear_status(self):
if self.tasks_queue.empty():
self.ui.spinning_label.hide()
self.ui.status_msg.hide()
def filter_issues_table(self):
cur_text = self.ui.lineIssueKey.text()
all_issues = self.app.get_all_issues()
issues = (x for x in all_issues if cur_text.lower() in x.key.lower() or
cur_text.lower() in x.summary.lower())
self.print_issues_table(issues)
def print_issues_table(self, issues):
logger.debug('Refreshing issues table')
self.ui.tableIssues.setSortingEnabled(False)
row_count = 0
self.ui.tableIssues.setRowCount(row_count)
for row_num, issue in enumerate(issues):
row_count += 1
self.ui.tableIssues.setRowCount(row_count)
table_item = QtWidgets.QTableWidgetItem(issue.key)
table_item.issue = issue
self.ui.tableIssues.setItem(row_num, 0, table_item)
self.ui.tableIssues.setItem(row_num, 1,
QtWidgets.QTableWidgetItem(issue.summary))
self.ui.tableIssues.resizeColumnToContents(0)
self.ui.tableIssues.sortByColumn(0, 0)
self.ui.tableIssues.setSortingEnabled(True)
logger.debug('Issues table was refreshed')
def print_worklog_table(self):
selected_day = self.ui.dateDayWorklogEdit.date().toPyDate()
day_worklog = self.app.get_day_worklog(selected_day)
day_total = timedelta(seconds=0)
self.ui.tableDayWorklog.setSortingEnabled(False)
row_count = 0
self.ui.tableDayWorklog.setRowCount(row_count)
for row_num, worklog_entry in enumerate(day_worklog):
row_count += 1
self.ui.tableDayWorklog.setRowCount(row_count)
issue_key_item = QtWidgets.QTableWidgetItem(worklog_entry.issue.key)
issue_key_item.worklog_entry = worklog_entry
self.ui.tableDayWorklog.setItem(row_num,
0,
issue_key_item)
self.ui.tableDayWorklog.setItem(row_num,
1,
QtWidgets.QTableWidgetItem(worklog_entry.issue.summary))
started_item = QtWidgets.QTableWidgetItem(worklog_entry.started.strftime('%H:%M'))
started_item.setTextAlignment(QtCore.Qt.AlignHCenter)
self.ui.tableDayWorklog.setItem(row_num,
2,
started_item)
ended_item = QtWidgets.QTableWidgetItem(worklog_entry.ended.strftime('%H:%M'))
ended_item.setTextAlignment(QtCore.Qt.AlignHCenter)
self.ui.tableDayWorklog.setItem(row_num,
3,
ended_item)
time_spent_item = QtWidgets.QTableWidgetItem(worklog_entry.get_timespent_string())
time_spent_item.setTextAlignment(QtCore.Qt.AlignHCenter)
self.ui.tableDayWorklog.setItem(row_num,
4,
time_spent_item)
day_total += worklog_entry.get_timespent()
if day_total.total_seconds() > 0:
self.ui.labelDaySummary.setText('Total:' + utils.get_time_spent_string(day_total))
else:
self.ui.labelDaySummary.clear()
# restore sorting
self.ui.tableDayWorklog.sortByColumn(2, QtCore.Qt.AscendingOrder)
self.ui.tableDayWorklog.setSortingEnabled(True)
# beautifying
self.ui.tableDayWorklog.horizontalHeader().setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)
for column in (0, 2, 3, 4):
self.ui.tableDayWorklog.resizeColumnToContents(column)
self.ui.tableDayWorklog.horizontalHeader().setSectionResizeMode(column,
QtWidgets.QHeaderView.Fixed)
self.ui.tableDayWorklog.horizontalHeader().setSectionResizeMode(column,
QtWidgets.QHeaderView.Fixed)
def set_issue_selected(self):
if not self.ui.startStopTracking.isChecked():
if not self.ui.tabIssues.isHidden():
try:
issue = self._get_selected_issue_from_table()
self.ui.labelSelectedIssue.setText(issue.key + ': ' + issue.summary)
except NotSelectedException:
# WTF?
pass
def online_tracking(self):
if self.ui.startStopTracking.isChecked():
# Button has been pressed already
self.start_online_tracking()
else:
self.stop_online_tracking()
def update_timer(self, seconds):
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
time_string = '%02d:%02d:%02d' % (hours, minutes, seconds)
self.ui.labelTimeSpent.setText(time_string)
def restore_from_tray(self):
self.showNormal()
def hide_to_tray(self):
self.hide()
def tray_click(self, reason):
if reason != QtWidgets.QSystemTrayIcon.Context:
if self.isHidden():
self.restore_from_tray()
else:
self.hide_to_tray()
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
if self.windowState() & QtCore.Qt.WindowMinimized:
event.ignore()
self.hide_to_tray()
return
super(MainWindow, self).changeEvent(event)
|
gpl-3.0
|
wilebeast/FireFox-OS
|
B2G/gecko/testing/marionette/client/marionette/keys.py
|
39
|
2581
|
# copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License Version 2.0 = uthe "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Keys(object):
NULL = u'\ue000'
CANCEL = u'\ue001' # ^break
HELP = u'\ue002'
BACK_SPACE = u'\ue003'
TAB = u'\ue004'
CLEAR = u'\ue005'
RETURN = u'\ue006'
ENTER = u'\ue007'
SHIFT = u'\ue008'
LEFT_SHIFT = u'\ue008' # alias
CONTROL = u'\ue009'
LEFT_CONTROL = u'\ue009' # alias
ALT = u'\ue00a'
LEFT_ALT = u'\ue00a' # alias
PAUSE = u'\ue00b'
ESCAPE = u'\ue00c'
SPACE = u'\ue00d'
PAGE_UP = u'\ue00e'
PAGE_DOWN = u'\ue00f'
END = u'\ue010'
HOME = u'\ue011'
LEFT = u'\ue012'
ARROW_LEFT = u'\ue012' # alias
UP = u'\ue013'
ARROW_UP = u'\ue013' # alias
RIGHT = u'\ue014'
ARROW_RIGHT = u'\ue014' # alias
DOWN = u'\ue015'
ARROW_DOWN = u'\ue015' # alias
INSERT = u'\ue016'
DELETE = u'\ue017'
SEMICOLON = u'\ue018'
EQUALS = u'\ue019'
NUMPAD0 = u'\ue01a' # numbe pad keys
NUMPAD1 = u'\ue01b'
NUMPAD2 = u'\ue01c'
NUMPAD3 = u'\ue01d'
NUMPAD4 = u'\ue01e'
NUMPAD5 = u'\ue01f'
NUMPAD6 = u'\ue020'
NUMPAD7 = u'\ue021'
NUMPAD8 = u'\ue022'
NUMPAD9 = u'\ue023'
MULTIPLY = u'\ue024'
ADD = u'\ue025'
SEPARATOR = u'\ue026'
SUBTRACT = u'\ue027'
DECIMAL = u'\ue028'
DIVIDE = u'\ue029'
F1 = u'\ue031' # function keys
F2 = u'\ue032'
F3 = u'\ue033'
F4 = u'\ue034'
F5 = u'\ue035'
F6 = u'\ue036'
F7 = u'\ue037'
F8 = u'\ue038'
F9 = u'\ue039'
F10 = u'\ue03a'
F11 = u'\ue03b'
F12 = u'\ue03c'
META = u'\ue03d'
COMMAND = u'\ue03d'
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.