repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
fspaolo/scikit-learn | sklearn/datasets/svmlight_format.py | 2 | 13301 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..utils import atleast2d_or_csr
from ..externals.six import u, b
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f: {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features: int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have example of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
with closing(_gen_open(f)) as f:
return _load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Rationale
---------
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
if n_features is None:
n_features = max(ind[1].max() for ind in r) + 1
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
s = " ".join([value_pattern % (j + one_based, X[i, j])
for j in X[i].nonzero()[is_sp]])
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = atleast2d_or_csr(X)
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
jsilhan/dnf | dnf/__init__.py | 5 | 1394 | # __init__.py
# The toplevel DNF package.
#
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
import warnings
import dnf.exceptions
import dnf.pycomp
warnings.filterwarnings('once', category=dnf.exceptions.DeprecationWarning)
import dnf.const
__version__ = dnf.const.VERSION
import dnf.base
Base = dnf.base.Base # :api
import dnf.plugin
Plugin = dnf.plugin.Plugin # :api
# setup libraries
dnf.pycomp.urlparse.uses_fragment.append("media")
| gpl-2.0 |
GroestlCoin/encompass | gui/qt/console.py | 4 | 10130 | # source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys, os, re
import traceback, platform
from PyQt4 import QtCore
from PyQt4 import QtGui
from chainkey import util
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class Console(QtGui.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtGui.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command.find("importprivkey") > -1:
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'%s' is a function. Type '%s()' to use it in the Python console."%(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_json(result)
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec command in self.namespace
except SystemExit:
self.close()
except Exception:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show();
sys.exit(app.exec_())
| gpl-3.0 |
lsinfo/odoo | openerp-wsgi.py | 363 | 1811 | # WSGI Handler sample configuration file.
#
# Change the appropriate settings below, in order to provide the parameters
# that would normally be passed in the command-line.
# (at least conf['addons_path'])
#
# For generic wsgi handlers a global application is defined.
# For uwsgi this should work:
# $ uwsgi_python --http :9090 --pythonpath . --wsgi-file openerp-wsgi.py
#
# For gunicorn additional globals need to be defined in the Gunicorn section.
# Then the following command should run:
# $ gunicorn openerp:service.wsgi_server.application -c openerp-wsgi.py
import openerp
#----------------------------------------------------------
# Common
#----------------------------------------------------------
openerp.multi_process = True # Nah!
# Equivalent of --load command-line option
openerp.conf.server_wide_modules = ['web']
conf = openerp.tools.config
# Path to the OpenERP Addons repository (comma-separated for
# multiple locations)
conf['addons_path'] = '../../addons/trunk,../../web/trunk/addons'
# Optional database config if not using local socket
#conf['db_name'] = 'mycompany'
#conf['db_host'] = 'localhost'
#conf['db_user'] = 'foo'
#conf['db_port'] = 5432
#conf['db_password'] = 'secret'
#----------------------------------------------------------
# Generic WSGI handlers application
#----------------------------------------------------------
application = openerp.service.wsgi_server.application
openerp.service.server.load_server_wide_modules()
#----------------------------------------------------------
# Gunicorn
#----------------------------------------------------------
# Standard OpenERP XML-RPC port is 8069
bind = '127.0.0.1:8069'
pidfile = '.gunicorn.pid'
workers = 4
timeout = 240
max_requests = 2000
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
whilo/toolz | doc/source/conf.py | 25 | 9229 | # -*- coding: utf-8 -*-
#
# Toolz documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 22 18:06:00 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Toolz'
copyright = u'2013, Matthew Rocklin, John Jacobsen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import toolz
version = toolz.__version__
# The full version, including alpha/beta/rc tags.
release = toolz.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Toolzdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Toolz.tex', u'Toolz Documentation',
u'Matthew Rocklin, John Jacobsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'toolz', u'Toolz Documentation',
[u'Matthew Rocklin, John Jacobsen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Toolz', u'Toolz Documentation',
u'Matthew Rocklin, John Jacobsen', 'Toolz', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Toolz'
epub_author = u'Matthew Rocklin, John Jacobsen'
epub_publisher = u'Matthew Rocklin, John Jacobsen'
epub_copyright = u'2013, Matthew Rocklin, John Jacobsen'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| bsd-3-clause |
gpu/CLBlast | scripts/database/database/bests.py | 2 | 2360 |
# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0. This file follows the
# PEP8 Python style guide and uses a max-width of 120 characters per line.
#
# Author(s):
# Cedric Nugteren <www.cedricnugteren.nl>
import sys
import database.clblast as clblast
def get_best_results(database):
"""Retrieves the results with the lowest execution times"""
sections_best = []
for section in database["sections"]:
section_best = {}
# Stores all the section's meta data
for attribute in section.keys():
if attribute != "results":
section_best[attribute] = section[attribute]
if section_best["clblast_device_architecture"] == "" and section_best["clblast_device_vendor"] in clblast.VENDORS_WITH_ARCHITECTURE:
section_best["clblast_device_architecture"] = clblast.DEVICE_ARCHITECTURE_DEFAULT
# Find the best result
parameters_best = None
time_best = sys.float_info.max
for result in section["results"]:
if result["time"] < time_best:
time_best = result["time"]
parameters_best = result["parameters"]
# Stores the best result
section_best["results"] = [{"time": time_best, "parameters": parameters_best}]
sections_best.append(section_best)
return {"sections": sections_best}
def get_relative_bests(name, common_results, common_parameters, verbose=False):
"""Retrieves the parameters with the relative best execution time over different devices"""
# Helper function
def argmin(iterable):
return min(enumerate(iterable), key=lambda x: x[1])[0]
# Computes the sum of the execution times over the different devices
performance_sums = []
for parameters in common_parameters:
performance_sum = sum([r["relative_time"] for r in common_results if r["parameters"] == parameters])
performance_sums.append(performance_sum)
# Retrieves the entry with the lowest time
best_index = argmin(performance_sums)
best_performance = performance_sums[best_index]
best_parameters = common_parameters[best_index]
# Completed, report and return the results
if verbose:
print("[database] " + str(name) + " with performance " + str(best_performance))
return best_parameters
| apache-2.0 |
briancoutinho0905/2dsampling | src/mem/slicc/ast/IfStatementAST.py | 71 | 3124 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
from slicc.symbols import Type
class IfStatementAST(StatementAST):
def __init__(self, slicc, cond, then, else_):
super(IfStatementAST, self).__init__(slicc)
assert cond is not None
assert then is not None
self.cond = cond
self.then = then
self.else_ = else_
def __repr__(self):
return "[IfStatement: %r%r%r]" % (self.cond, self.then, self.else_)
def generate(self, code, return_type):
cond_code = self.slicc.codeFormatter()
cond_type = self.cond.generate(cond_code)
if cond_type != self.symtab.find("bool", Type):
self.cond.error("Condition of if stmt must be bool, type was '%s'",
cond_type)
# Conditional
code.indent()
code('if ($cond_code) {')
# Then part
code.indent()
self.symtab.pushFrame()
self.then.generate(code, return_type)
self.symtab.popFrame()
code.dedent()
# Else part
if self.else_:
code('} else {')
code.indent()
self.symtab.pushFrame()
self.else_.generate(code, return_type)
self.symtab.popFrame()
code.dedent()
code('}') # End scope
def findResources(self, resources):
# Take a worse case look at both paths
self.then.findResources(resources)
if self.else_ is not None:
self.else_.findResources(resources)
| bsd-3-clause |
aranjan7/contrail-controller-aranjan | src/config/common/network.py | 19 | 1506 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from quantumclient.quantum import client
from quantumclient.client import HTTPClient
from quantumclient.common import exceptions
class CommonQuantumClient(object):
def __init__(self, project, user, passwd, api_server_ip):
AUTH_URL = 'http://%s:5000/v2.0' % (api_server_ip)
httpclient = HTTPClient(username=user, tenant_name=project,
password=passwd, auth_url=AUTH_URL)
httpclient.authenticate()
OS_URL = 'http://%s:9696/' % (api_server_ip)
OS_TOKEN = httpclient.auth_token
self._quantum = client.Client('2.0', endpoint_url=OS_URL,
token=OS_TOKEN)
# end __init__
def common_create_vn(self, vn_name):
print "Creating network %s" % (vn_name)
net_req = {'name': '%s' % (vn_name)}
net_rsp = self._quantum.create_network({'network': net_req})
net_uuid = net_rsp['network']['id']
return net_rsp
# end common_create_vn
def common_create_subnet(self, net_id, cidr, ipam_fq_name):
subnet_req = {'network_id': net_id,
'cidr': cidr,
'ip_version': 4,
'contrail:ipam_fq_name': ipam_fq_name}
subnet_rsp = self._quantum.create_subnet({'subnet': subnet_req})
print 'Response for create_subnet : ' + repr(subnet_rsp)
# end _common_create_subnet
# end class CommonQuantumClient
| apache-2.0 |
vheon/ycmd | ycmd/handlers.py | 2 | 13157 | # Copyright (C) 2013-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import bottle
import json
import platform
import sys
import time
import traceback
from bottle import request
from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store
from ycmd.responses import ( BuildExceptionResponse,
BuildCompletionResponse,
BuildResolveCompletionResponse,
BuildSignatureHelpResponse,
BuildSignatureHelpAvailableResponse,
SignatureHelpAvailalability,
UnknownExtraConf )
from ycmd.request_wrap import RequestWrap
from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap
from ycmd.utils import LOGGER, StartThread, ImportCore
ycm_core = ImportCore()
# num bytes for the request body buffer; request.json only works if the request
# size is less than this
bottle.Request.MEMFILE_MAX = 10 * 1024 * 1024
_server_state = None
_hmac_secret = bytes()
app = bottle.Bottle()
wsgi_server = None
@app.post( '/event_notification' )
def EventNotification():
LOGGER.info( 'Received event notification' )
request_data = RequestWrap( request.json )
event_name = request_data[ 'event_name' ]
LOGGER.debug( 'Event name: %s', event_name )
event_handler = 'On' + event_name
getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
filetypes = request_data[ 'filetypes' ]
response_data = None
if _server_state.FiletypeCompletionUsable( filetypes ):
response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
event_handler )( request_data )
if response_data:
return _JsonResponse( response_data )
return _JsonResponse( {} )
@app.get( '/signature_help_available' )
def GetSignatureHelpAvailable():
LOGGER.info( 'Received signature help available request' )
if request.query.subserver:
filetype = request.query.subserver
try:
completer = _server_state.GetFiletypeCompleter( [ filetype ] )
except ValueError:
return _JsonResponse( BuildSignatureHelpAvailableResponse(
SignatureHelpAvailalability.NOT_AVAILABLE ) )
value = completer.SignatureHelpAvailable()
return _JsonResponse( BuildSignatureHelpAvailableResponse( value ) )
else:
raise RuntimeError( 'Subserver not specified' )
@app.post( '/run_completer_command' )
def RunCompleterCommand():
LOGGER.info( 'Received command request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.OnUserCommand(
request_data[ 'command_arguments' ],
request_data ) )
@app.post( '/resolve_fixit' )
def ResolveFixit():
LOGGER.info( 'Received resolve_fixit request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.ResolveFixit( request_data ) )
@app.post( '/completions' )
def GetCompletions():
LOGGER.info( 'Received completion request' )
request_data = RequestWrap( request.json )
do_filetype_completion = _server_state.ShouldUseFiletypeCompleter(
request_data )
LOGGER.debug( 'Using filetype completion: %s', do_filetype_completion )
errors = None
completions = None
if do_filetype_completion:
try:
filetype_completer = _server_state.GetFiletypeCompleter(
request_data[ 'filetypes' ] )
completions = filetype_completer.ComputeCandidates( request_data )
except Exception as exception:
if request_data[ 'force_semantic' ]:
# user explicitly asked for semantic completion, so just pass the error
# back
raise
# store the error to be returned with results from the identifier
# completer
LOGGER.exception( 'Exception from semantic completer (using general)' )
stack = traceback.format_exc()
errors = [ BuildExceptionResponse( exception, stack ) ]
if not completions and not request_data[ 'force_semantic' ]:
completions = _server_state.GetGeneralCompleter().ComputeCandidates(
request_data )
return _JsonResponse(
BuildCompletionResponse( completions if completions else [],
request_data[ 'start_column' ],
errors = errors ) )
@app.post( '/resolve_completion' )
def ResolveCompletionItem():
LOGGER.info( "Received resolve request" )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
errors = None
completion = None
try:
completion = completer.ResolveCompletionItem( request_data )
except Exception as e:
errors = [ BuildExceptionResponse( e, traceback.format_exc() ) ]
return _JsonResponse( BuildResolveCompletionResponse( completion, errors ) )
@app.post( '/signature_help' )
def GetSignatureHelp():
LOGGER.info( 'Received signature help request' )
request_data = RequestWrap( request.json )
if not _server_state.FiletypeCompletionUsable( request_data[ 'filetypes' ],
silent = True ):
return _JsonResponse( BuildSignatureHelpResponse( None ) )
errors = None
signature_info = None
try:
filetype_completer = _server_state.GetFiletypeCompleter(
request_data[ 'filetypes' ] )
signature_info = filetype_completer.ComputeSignatures( request_data )
except Exception as exception:
LOGGER.exception( 'Exception from semantic completer during sig help' )
errors = [ BuildExceptionResponse( exception, traceback.format_exc() ) ]
# No fallback for signature help. The general completer is unlikely to be able
# to offer anything of for that here.
return _JsonResponse(
BuildSignatureHelpResponse( signature_info, errors = errors ) )
@app.post( '/filter_and_sort_candidates' )
def FilterAndSortCandidates():
LOGGER.info( 'Received filter & sort request' )
# Not using RequestWrap because no need and the requests coming in aren't like
# the usual requests we handle.
request_data = request.json
return _JsonResponse( FilterAndSortCandidatesWrap(
request_data[ 'candidates' ],
request_data[ 'sort_property' ],
request_data[ 'query' ],
_server_state.user_options[ 'max_num_candidates' ] ) )
@app.get( '/healthy' )
def GetHealthy():
LOGGER.info( 'Received health request' )
if request.query.subserver:
filetype = request.query.subserver
completer = _server_state.GetFiletypeCompleter( [ filetype ] )
return _JsonResponse( completer.ServerIsHealthy() )
return _JsonResponse( True )
@app.get( '/ready' )
def GetReady():
LOGGER.info( 'Received ready request' )
if request.query.subserver:
filetype = request.query.subserver
completer = _server_state.GetFiletypeCompleter( [ filetype ] )
return _JsonResponse( completer.ServerIsReady() )
return _JsonResponse( True )
@app.post( '/semantic_completion_available' )
def FiletypeCompletionAvailable():
LOGGER.info( 'Received filetype completion available request' )
return _JsonResponse( _server_state.FiletypeCompletionAvailable(
RequestWrap( request.json )[ 'filetypes' ] ) )
@app.post( '/defined_subcommands' )
def DefinedSubcommands():
LOGGER.info( 'Received defined subcommands request' )
completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
return _JsonResponse( completer.DefinedSubcommands() )
@app.post( '/detailed_diagnostic' )
def GetDetailedDiagnostic():
LOGGER.info( 'Received detailed diagnostic request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
@app.post( '/load_extra_conf_file' )
def LoadExtraConfFile():
LOGGER.info( 'Received extra conf load request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
return _JsonResponse( True )
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
LOGGER.info( 'Received extra conf ignore request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
return _JsonResponse( True )
@app.post( '/debug_info' )
def DebugInfo():
LOGGER.info( 'Received debug info request' )
request_data = RequestWrap( request.json )
has_clang_support = ycm_core.HasClangSupport()
clang_version = ycm_core.ClangVersion() if has_clang_support else None
filepath = request_data[ 'filepath' ]
try:
extra_conf_path = extra_conf_store.ModuleFileForSourceFile( filepath )
is_loaded = bool( extra_conf_path )
except UnknownExtraConf as error:
extra_conf_path = error.extra_conf_file
is_loaded = False
response = {
'python': {
'executable': sys.executable,
'version': platform.python_version()
},
'clang': {
'has_support': has_clang_support,
'version': clang_version
},
'extra_conf': {
'path': extra_conf_path,
'is_loaded': is_loaded
},
'completer': None
}
try:
response[ 'completer' ] = _GetCompleterForRequestData(
request_data ).DebugInfo( request_data )
except Exception:
LOGGER.exception( 'Error retrieving completer debug info' )
return _JsonResponse( response )
@app.post( '/shutdown' )
def Shutdown():
LOGGER.info( 'Received shutdown request' )
ServerShutdown()
return _JsonResponse( True )
@app.post( '/receive_messages' )
def ReceiveMessages():
# Receive messages is a "long-poll" handler.
# The client makes the request with a long timeout (1 hour).
# When we have data to send, we send it and close the socket.
# The client then sends a new request.
request_data = RequestWrap( request.json )
try:
completer = _GetCompleterForRequestData( request_data )
except Exception:
# No semantic completer for this filetype, don't requery. This is not an
# error.
return _JsonResponse( False )
return _JsonResponse( completer.PollForMessages( request_data ) )
# The type of the param is Bottle.HTTPError
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
# For every error Bottle encounters it will use this as the default handler
app.default_error_handler = ErrorHandler
def _JsonResponse( data ):
bottle.response.set_header( 'Content-Type', 'application/json' )
return json.dumps( data,
separators = ( ',', ':' ),
default = _UniversalSerialize )
def _UniversalSerialize( obj ):
try:
serialized = obj.__dict__.copy()
serialized[ 'TYPE' ] = type( obj ).__name__
return serialized
except AttributeError:
return str( obj )
def _GetCompleterForRequestData( request_data ):
completer_target = request_data.get( 'completer_target', None )
if completer_target == 'identifier':
return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
elif completer_target == 'filetype_default' or not completer_target:
return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
else:
return _server_state.GetFiletypeCompleter( [ completer_target ] )
def ServerShutdown():
def Terminator():
if wsgi_server:
wsgi_server.Shutdown()
# Use a separate thread to let the server send the response before shutting
# down.
StartThread( Terminator )
def ServerCleanup():
if _server_state:
_server_state.Shutdown()
extra_conf_store.Shutdown()
def SetHmacSecret( hmac_secret ):
global _hmac_secret
_hmac_secret = hmac_secret
def UpdateUserOptions( options ):
global _server_state
if not options:
return
# This should never be passed in, but let's try to remove it just in case.
options.pop( 'hmac_secret', None )
user_options_store.SetAll( options )
_server_state = server_state.ServerState( options )
def KeepSubserversAlive( check_interval_seconds ):
def Keepalive( check_interval_seconds ):
while True:
time.sleep( check_interval_seconds )
LOGGER.debug( 'Keeping subservers alive' )
loaded_completers = _server_state.GetLoadedFiletypeCompleters()
for completer in loaded_completers:
completer.ServerIsHealthy()
StartThread( Keepalive, check_interval_seconds )
| gpl-3.0 |
ardi69/pyload-0.4.10 | pyload/plugin/account/FilerNet.py | 1 | 1976 | # -*- coding: utf-8 -*-
import re
import time
from pyload.plugin.Account import Account
class FilerNet(Account):
__name = "FilerNet"
__type = "account"
__version = "0.04"
__description = """Filer.net account plugin"""
__license = "GPLv3"
__authors = [("stickell", "[email protected]")]
TOKEN_PATTERN = r'_csrf_token" value="(.+?)" />'
WALID_UNTIL_PATTERN = r'Der Premium-Zugang ist gültig bis (.+)\.\s*</td>'
TRAFFIC_PATTERN = r'Traffic</th>\s*<td>([^<]+)</td>'
FREE_PATTERN = r'Account Status</th>\s*<td>\s*Free'
def loadAccountInfo(self, user, req):
html = req.load("https://filer.net/profile")
# Free user
if re.search(self.FREE_PATTERN, html):
return {"premium": False, "validuntil": None, "trafficleft": None}
until = re.search(self.WALID_UNTIL_PATTERN, html)
traffic = re.search(self.TRAFFIC_PATTERN, html)
if until and traffic:
validuntil = time.mktime(time.strptime(until.group(1), "%d.%m.%Y %H:%M:%S"))
trafficleft = self.parseTraffic(traffic.group(1))
return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
else:
self.logError(_("Unable to retrieve account information"))
return {"premium": False, "validuntil": None, "trafficleft": None}
def login(self, user, data, req):
html = req.load("https://filer.net/login")
token = re.search(self.TOKEN_PATTERN, html).group(1)
html = req.load("https://filer.net/login_check",
post={"_username": user,
"_password": data['password'],
"_remember_me": "on",
"_csrf_token": token,
"_target_path": "https://filer.net/"},
decode=True)
if 'Logout' not in html:
self.wrongPassword()
| gpl-3.0 |
zorojean/scrapy | tests/test_mail.py | 129 | 2331 | import unittest
from io import BytesIO
from scrapy.mail import MailSender
class MailSenderTest(unittest.TestCase):
def test_send(self):
mailsender = MailSender(debug=True)
mailsender.send(to=['[email protected]'], subject='subject', body='body', _callback=self._catch_mail_sent)
assert self.catched_msg
self.assertEqual(self.catched_msg['to'], ['[email protected]'])
self.assertEqual(self.catched_msg['subject'], 'subject')
self.assertEqual(self.catched_msg['body'], 'body')
msg = self.catched_msg['msg']
self.assertEqual(msg['to'], '[email protected]')
self.assertEqual(msg['subject'], 'subject')
self.assertEqual(msg.get_payload(), 'body')
self.assertEqual(msg.get('Content-Type'), 'text/plain')
def test_send_html(self):
mailsender = MailSender(debug=True)
mailsender.send(to=['[email protected]'], subject='subject', body='<p>body</p>', mimetype='text/html', _callback=self._catch_mail_sent)
msg = self.catched_msg['msg']
self.assertEqual(msg.get_payload(), '<p>body</p>')
self.assertEqual(msg.get('Content-Type'), 'text/html')
def test_send_attach(self):
attach = BytesIO()
attach.write(b'content')
attach.seek(0)
attachs = [('attachment', 'text/plain', attach)]
mailsender = MailSender(debug=True)
mailsender.send(to=['[email protected]'], subject='subject', body='body',
attachs=attachs, _callback=self._catch_mail_sent)
assert self.catched_msg
self.assertEqual(self.catched_msg['to'], ['[email protected]'])
self.assertEqual(self.catched_msg['subject'], 'subject')
self.assertEqual(self.catched_msg['body'], 'body')
msg = self.catched_msg['msg']
self.assertEqual(msg['to'], '[email protected]')
self.assertEqual(msg['subject'], 'subject')
payload = msg.get_payload()
assert isinstance(payload, list)
self.assertEqual(len(payload), 2)
text, attach = payload
self.assertEqual(text.get_payload(decode=True), 'body')
self.assertEqual(attach.get_payload(decode=True), 'content')
def _catch_mail_sent(self, **kwargs):
self.catched_msg = dict(**kwargs)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
asiviero/brbeerindex | beerindex/spiders/winespider.py | 1 | 8963 | # -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from scrapy.conf import settings
from beerindex.items import WineIndexItem
import logging
import lxml.html
from urlparse import urlparse
import re
class WineSpider(Spider):
name = "winespider"
beer_sites = {
'www.wine.com.br':
{
"start_url" : 'https://www.wine.com.br/browse.ep?cID=100851',
"next_link" : '.navegacaoListagem li.prox a::attr(href)',
"product_link" : '#listagemProdutos ul li a[itemprop="name url"]::attr("href")',
"xpath_name" : "//h1[@itemprop='name']//text()",
"xpath_winetype" : "//div[@class='tipoVinho']//p/text()",
"xpath_volume" : "//div[@class='dadosAvancados']//*[@itemprop='weight']/*[@itemprop='value']/text()",
"xpath_grape" : "//div[@class='dadosAvancados']//li[contains(.,'Uva')]//*[contains(@class,'valor')]/text()",
"xpath_alcohol" : u"//div[@class='dadosAvancados']//li[contains(.,'Teor Alcoólico')]//*[contains(@class,'valor')]/text()",
"xpath_country" : "//div[@id='boxProduto']//div[@class='imgPais']/p//text()",
"xpath_region" : u"//div[@class='dadosAvancados']//li[contains(.,'Região')]//*[contains(@class,'valor')]/text()",
"xpath_winery" : u"//div[@class='dadosAvancados']//li[contains(.,'Vinícola')]//*[contains(@class,'valor')]/text()",
"xpath_price" : "//div[@id='boxProduto']//div[@class='boxPreco']//*[@itemprop='price']//text()",
},
'www.grandcru.com.br':
{
"start_url" : 'http://www.grandcru.com.br/vinhos.html',
"next_link" : '.pages li a.next::attr(href)',
"product_link" : '.products-list h2.product-name a::attr("href")',
"xpath_name" : "//h1[@class='product-name']//text()",
"xpath_winetype" : "//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'Tipo de Vinho')]/td/text()",
"xpath_volume" : "//div[@class='dadosAvancados']//*[@itemprop='weight']/*[@itemprop='value']/text()",
"xpath_grape" : "//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'Uva')]/td/text()",
"xpath_alcohol" : u"//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'Graduação Alcoólica')]/td/text()",
"xpath_country" : u"//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'País')]/td/text()",
"xpath_region" : u"//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'Região Produtora')]/td/text()",
"xpath_winery" : u"//table[contains(@class,'tbl-caracteristicas')]//tr[contains(.,'Produtor')]/td/text()",
"xpath_price" : "//div[@class='produto_dados']//div[@class='price-box']//*[@itemprop='price']//text()",
},
'www.sommeliervinhos.com.br' :
{
"start_url" : 'http://www.sommeliervinhos.com.br/galeria.php?categoria=2',
"next_link" : '.paginacao a.bt-proxima::attr(href)',
"product_link" : '.lista-produtos li a::attr("href")',
"xpath_name" : "//h1[@class='tit-paginas-h1']//text()",
"xpath_winetype" : "//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Tipo')]/text()",
"xpath_volume" : "//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Volume')]/text()",
"xpath_grape" : "//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Uva')]/text()",
"xpath_alcohol" : u"//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Alcool')]/text()",
"xpath_country" : u"//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'País')]/text()",
"xpath_region" : u"//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Região')]/text()",
"xpath_winery" : u"//div[contains(@class,'prod-caracteristicas')]//li[contains(.,'Produtor')]/text()",
"xpath_price" : "//div[@id='principal']//div[@class='valores-produtos']//*[@class='preco-atual']//text()",
},
'www.vinomundi.com.br':
{
"start_url" : 'http://www.vinomundi.com.br/vinhos',
"next_link" : '.pages li a.next::attr(href)',
"product_link" : 'ul.products-grid li.item h2 a::attr("href")',
"xpath_name" : "//*[@class='product-shop']//h1//text()",
"xpath_winetype" : "//table[contains(@class,'data-table')]//tr[contains(.,'Tipo')]/td/text()",
"xpath_volume" : "//table[contains(@class,'data-table')]//tr[contains(.,'Volume')]/td/text()",
"xpath_grape" : "//table[contains(@class,'data-table')]//tr[contains(.,'Uva')]/td/text()",
"xpath_alcohol" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Alcool')]/td/text()",
"xpath_country" : u"//table[contains(@class,'data-table')]//tr[contains(.,'País')]/td/text()",
"xpath_region" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Região')]/td/text()",
"xpath_winery" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Produtor')]/td/text()",
"xpath_price" : "//div[@class='product-shop']//div[@class='price-box']//*[@class='preco-produto-valor']//@value",
},
# 'www.vinhosweb.com.br':
# {
# "start_url" : 'http://www.vinhosweb.com.br/vinhos/vinhos',
# "next_link" : '.pages li a.next::attr(href)',
# "product_link" : 'ul.products-grid li.item h2 a::attr("href")',
# "xpath_name" : "//*[@class='product-shop']//h1//text()",
# "xpath_winetype" : "//table[contains(@class,'data-table')]//tr[contains(.,'Tipo')]/td/text()",
# "xpath_volume" : "//table[contains(@class,'data-table')]//tr[contains(.,'Volume')]/td/text()",
# "xpath_grape" : "//table[contains(@class,'data-table')]//tr[contains(.,'Uva')]/td/text()",
# "xpath_alcohol" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Alcool')]/td/text()",
# "xpath_country" : u"//table[contains(@class,'data-table')]//tr[contains(.,'País')]/td/text()",
# "xpath_region" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Região')]/td/text()",
# "xpath_winery" : u"//table[contains(@class,'data-table')]//tr[contains(.,'Produtor')]/td/text()",
# "xpath_price" : "//div[@class='product-shop']//div[@class='price-box']//*[@class='preco-produto-valor']//@value",
# }
}
def domain_from_url(self,url):
parsed = urlparse(url)
return parsed.netloc
#allowed_domains = ["www.cervejastore.com.br"]
# start_urls = ['http://www.mundodascervejas.com/buscar?q=cerveja']
# start_urls = ["http://www.emporioveredas.com.br/cervejas-importadas.html"]
start_urls = [beer_sites[store]["start_url"] for store in beer_sites]
def parse(self,response):
domain = self.domain_from_url(response.url)
for url in response.css(self.beer_sites[domain]["next_link"]).extract():
request = Request(response.urljoin(url.strip()), self.parse)
yield request
titles = response.css(self.beer_sites[domain]["product_link"]).extract()
for title in titles:
yield Request(response.urljoin(title), self.parse_product)
def parse_product(self,response):
domain = self.domain_from_url(response.url)
item = WineIndexItem()
item["name"] = response.xpath(self.beer_sites[domain]["xpath_name"]).extract_first().strip("\t\n\r")
item["winetype"] = response.xpath(self.beer_sites[domain]["xpath_winetype"]).extract_first().strip("\t\n\r")
try:
item["volume"] = response.xpath(self.beer_sites[domain]["xpath_volume"]).extract_first().strip("\t\n\r")
except AttributeError:
pass
try:
item["grape"] = response.xpath(self.beer_sites[domain]["xpath_grape"]).extract_first().strip("\t\n\r")
except AttributeError:
pass
try:
item["alcohol"] = response.xpath(self.beer_sites[domain]["xpath_alcohol"]).extract_first()
except AttributeError:
pass
item["country"] = response.xpath(self.beer_sites[domain]["xpath_country"]).extract_first().strip("\t\n\r")
item["region"] = response.xpath(self.beer_sites[domain]["xpath_region"]).extract_first()
item["winery"] = response.xpath(self.beer_sites[domain]["xpath_winery"]).extract_first()
item["link"] = response.url
item["price"] = "".join(response.xpath(self.beer_sites[domain]["xpath_price"]).extract())
item["price"] = re.sub(r"\s+", "", item["price"], flags=re.UNICODE)
item["price"] = re.sub(r"[^\d,\.]*", "", item["price"], flags=re.UNICODE)
item["price"] = re.sub(r",", ".", item["price"], flags=re.UNICODE)
yield item
| lgpl-2.1 |
wndhydrnt/airflow | tests/macros/test_hive.py | 15 | 1957 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
import unittest
from airflow.macros import hive
class Hive(unittest.TestCase):
def test_closest_ds_partition(self):
d1 = datetime.strptime('2017-04-24', '%Y-%m-%d')
d2 = datetime.strptime('2017-04-25', '%Y-%m-%d')
d3 = datetime.strptime('2017-04-26', '%Y-%m-%d')
d4 = datetime.strptime('2017-04-28', '%Y-%m-%d')
d5 = datetime.strptime('2017-04-29', '%Y-%m-%d')
target_dt = datetime.strptime('2017-04-27', '%Y-%m-%d')
date_list = [d1, d2, d3, d4, d5]
self.assertEquals("2017-04-26", str(hive._closest_date(target_dt, date_list, True)))
self.assertEquals("2017-04-28", str(hive._closest_date(target_dt, date_list, False)))
# when before is not set, the closest date should be returned
self.assertEquals("2017-04-26", str(hive._closest_date(target_dt, [d1, d2, d3, d5], None)))
self.assertEquals("2017-04-28", str(hive._closest_date(target_dt, [d1, d2, d4, d5])))
self.assertEquals("2017-04-26", str(hive._closest_date(target_dt, date_list)))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
broferek/ansible | lib/ansible/modules/cloud/openstack/os_image_info.py | 20 | 5933 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: os_image_info
short_description: Retrieve information about an image within OpenStack.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve information about a image image from OpenStack.
- This module was called C(os_image_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(os_image_info) module no longer returns C(ansible_facts)!
requirements:
- "python >= 2.7"
- "openstacksdk"
options:
image:
description:
- Name or ID of the image
required: false
properties:
description:
- Dict of properties of the images used for query
type: dict
required: false
version_added: '2.9'
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
- name: Gather information about a previously created image named image1
os_image_info:
auth:
auth_url: https://identity.example.com
username: user
password: password
project_name: someproject
image: image1
register: result
- name: Show openstack information
debug:
msg: "{{ result.openstack_image }}"
# Show all available Openstack images
- name: Retrieve all available Openstack images
os_image_info:
register: result
- name: Show images
debug:
msg: "{{ result.openstack_image }}"
# Show images matching requested properties
- name: Retrieve images having properties with desired values
os_image_facts:
properties:
some_property: some_value
OtherProp: OtherVal
- name: Show images
debug:
msg: "{{ result.openstack_image }}"
'''
RETURN = '''
openstack_image:
description: has all the openstack information about the image
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: str
name:
description: Name given to the image.
returned: success
type: str
status:
description: Image status.
returned: success
type: str
created_at:
description: Image created at timestamp.
returned: success
type: str
deleted:
description: Image deleted flag.
returned: success
type: bool
container_format:
description: Container format of the image.
returned: success
type: str
min_ram:
description: Min amount of RAM required for this image.
returned: success
type: int
disk_format:
description: Disk format of the image.
returned: success
type: str
updated_at:
description: Image updated at timestamp.
returned: success
type: str
properties:
description: Additional properties associated with the image.
returned: success
type: dict
min_disk:
description: Min amount of disk space required for this image.
returned: success
type: int
protected:
description: Image protected flag.
returned: success
type: bool
checksum:
description: Checksum for the image.
returned: success
type: str
owner:
description: Owner for the image.
returned: success
type: str
is_public:
description: Is public flag of the image.
returned: success
type: bool
deleted_at:
description: Image deleted at timestamp.
returned: success
type: str
size:
description: Size of the image.
returned: success
type: int
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def main():
argument_spec = openstack_full_argument_spec(
image=dict(required=False),
properties=dict(default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
is_old_facts = module._name == 'os_image_facts'
if is_old_facts:
module.deprecate("The 'os_image_facts' module has been renamed to 'os_image_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
sdk, cloud = openstack_cloud_from_module(module)
try:
if module.params['image']:
image = cloud.get_image(module.params['image'])
if is_old_facts:
module.exit_json(changed=False, ansible_facts=dict(
openstack_image=image))
else:
module.exit_json(changed=False, openstack_image=image)
else:
images = cloud.search_images(filters=module.params['properties'])
if is_old_facts:
module.exit_json(changed=False, ansible_facts=dict(
openstack_image=images))
else:
module.exit_json(changed=False, openstack_image=images)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
40223204/w16b_test | static/Brython3.1.3-20150514-095342/Lib/test/test_re.py | 718 | 56009 | # FIXME: brython: implement test.support
#from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
# cpython_only
verbose = True
# FIXME: brython: Not used in this module ?
#import io
import re
# FIXME: brython: implement re.Scanner
#from re import Scanner
import sre_constants
import sys
import string
import traceback
# FIXME: brython: implement _weakref
#from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
# FIXME: brython: implement test.support
# def test_keep_buffer(self):
# # See bug 14212
# b = bytearray(b'x')
# it = re.finditer(b'a', b)
# with self.assertRaises(BufferError):
# b.extend(b'x'*400)
# list(it)
# del it
# gc_collect()
# b.extend(b'x'*400)
# FIXME: brython: implement _weakref
# def test_weakref(self):
# s = 'QabbbcR'
# x = re.compile('ab+c')
# y = proxy(x)
# self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
# New valid/invalid identifiers in Python 3
re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.assertRaises(re.error, re.compile, '(?P<©>x)')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
# New valid/invalid identifiers in Python 3
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
"abc")
# There's a word boundary at the start of a string.
self.assertTrue(re.match(r"\b", "abc"))
# A non-empty string includes a non-boundary zero-length match.
self.assertTrue(re.search(r"\B", "abc"))
# There is no non-boundary match at the start of a string.
self.assertFalse(re.match(r"\B", "abc"))
# However, an empty string contains no word boundaries, and also no
# non-boundaries.
self.assertEqual(re.search(r"\B", ""), None)
# This one is questionable and different from the perlre behaviour,
# but describes current behavior.
self.assertEqual(re.search(r"\b", ""), None)
# A single word-character string has two boundaries, but no
# non-boundary gaps.
self.assertEqual(len(re.findall(r"\b", "a")), 2)
self.assertEqual(len(re.findall(r"\B", "a")), 0)
# If there are no words, there are no boundaries
self.assertEqual(len(re.findall(r"\b", " ")), 0)
self.assertEqual(len(re.findall(r"\b", " ")), 0)
# Can match around the whitespace.
self.assertEqual(len(re.findall(r"\B", " ")), 2)
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_big_codesize(self):
# Issue #1160
r = re.compile('|'.join(('%d'%x for x in range(10000))))
self.assertIsNotNone(r.match('1000'))
self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def assertMatch(self, pattern, text, match=None, span=None,
matcher=re.match):
if match is None and span is None:
# the pattern matches the whole text
match = text
span = (0, len(text))
elif match is None or span is None:
raise ValueError('If match is not None, span should be specified '
'(and vice versa).')
m = matcher(pattern, text)
self.assertTrue(m)
self.assertEqual(m.group(), match)
self.assertEqual(m.span(), span)
def test_re_escape(self):
alnum_chars = string.ascii_letters + string.digits + '_'
p = ''.join(chr(i) for i in range(256))
for c in p:
if c in alnum_chars:
self.assertEqual(re.escape(c), c)
elif c == '\x00':
self.assertEqual(re.escape(c), '\\000')
else:
self.assertEqual(re.escape(c), '\\' + c)
self.assertMatch(re.escape(c), c)
self.assertMatch(re.escape(p), p)
def test_re_escape_byte(self):
alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii')
p = bytes(range(256))
for i in p:
b = bytes([i])
if b in alnum_chars:
self.assertEqual(re.escape(b), b)
elif i == 0:
self.assertEqual(re.escape(b), b'\\000')
else:
self.assertEqual(re.escape(b), b'\\' + b)
self.assertMatch(re.escape(b), b)
self.assertMatch(re.escape(p), p)
def test_re_escape_non_ascii(self):
s = 'xxx\u2620\u2620\u2620xxx'
s_escaped = re.escape(s)
self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx')
self.assertMatch(s_escaped, s)
self.assertMatch('.%s+.' % re.escape('\u2620'), s,
'x\u2620\u2620\u2620x', (2, 7), re.search)
def test_re_escape_non_ascii_bytes(self):
b = 'y\u2620y\u2620y'.encode('utf-8')
b_escaped = re.escape(b)
self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y')
self.assertMatch(b_escaped, b)
res = re.findall(re.escape('\u2620'.encode('utf-8')), b)
self.assertEqual(len(res), 2)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"\%03o" % i, chr(i)))
self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8"))
self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z"))
if i < 0x10000:
self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\0", "\000"))
self.assertIsNotNone(re.match(r"\08", "\0008"))
self.assertIsNotNone(re.match(r"\01", "\001"))
self.assertIsNotNone(re.match(r"\018", "\0018"))
self.assertIsNotNone(re.match(r"\567", chr(0o167)))
self.assertRaises(re.error, re.match, r"\911", "")
self.assertRaises(re.error, re.match, r"\x1", "")
self.assertRaises(re.error, re.match, r"\x1z", "")
self.assertRaises(re.error, re.match, r"\u123", "")
self.assertRaises(re.error, re.match, r"\u123z", "")
self.assertRaises(re.error, re.match, r"\U0001234", "")
self.assertRaises(re.error, re.match, r"\U0001234z", "")
self.assertRaises(re.error, re.match, r"\U00110000", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i)))
if i < 0x10000:
self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e"))
self.assertRaises(re.error, re.match, r"[\911]", "")
self.assertRaises(re.error, re.match, r"[\x1z]", "")
self.assertRaises(re.error, re.match, r"[\u123z]", "")
self.assertRaises(re.error, re.match, r"[\U0001234z]", "")
self.assertRaises(re.error, re.match, r"[\U00110000]", "")
def test_sre_byte_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8"))
self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z"))
self.assertIsNotNone(re.match(br"\u", b'u'))
self.assertIsNotNone(re.match(br"\U", b'U'))
self.assertIsNotNone(re.match(br"\0", b"\000"))
self.assertIsNotNone(re.match(br"\08", b"\0008"))
self.assertIsNotNone(re.match(br"\01", b"\001"))
self.assertIsNotNone(re.match(br"\018", b"\0018"))
self.assertIsNotNone(re.match(br"\567", bytes([0o167])))
self.assertRaises(re.error, re.match, br"\911", b"")
self.assertRaises(re.error, re.match, br"\x1", b"")
self.assertRaises(re.error, re.match, br"\x1z", b"")
def test_sre_byte_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match(br"[\u]", b'u'))
self.assertIsNotNone(re.match(br"[\U]", b'U'))
self.assertRaises(re.error, re.match, br"[\911]", "")
self.assertRaises(re.error, re.match, br"[\x1z]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_unlimited_zero_width_repeat(self):
# Issue #9669
self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
# def test_scanner(self):
# def s_ident(scanner, token): return token
# def s_operator(scanner, token): return "op%s" % token
# def s_float(scanner, token): return float(token)
# def s_int(scanner, token): return int(token)
#
# scanner = Scanner([
# (r"[a-zA-Z_]\w*", s_ident),
# (r"\d+\.\d*", s_float),
# (r"\d+", s_int),
# (r"=|\+|-|\*|/", s_operator),
# (r"\s+", None),
# ])
#
# self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
#
# self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
# (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
# 'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def test_search_dot_unicode(self):
self.assertIsNotNone(re.search("123.*-", '123abc-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9-'))
self.assertIsNotNone(re.search("123.*-", '123\u20ac-'))
self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-'))
def test_compile(self):
# Test return value when given string and pattern as parameter
pattern = re.compile('random pattern')
self.assertIsInstance(pattern, re._pattern_type)
same_pattern = re.compile(pattern)
self.assertIsInstance(same_pattern, re._pattern_type)
self.assertIs(same_pattern, pattern)
# Test behaviour when not given a string or pattern as parameter
self.assertRaises(TypeError, re.compile, 0)
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
['A', 'B', '\b', 'C', 'Z'])
# FIXME: brython: implement test.support
# @bigmemtest(size=_2G, memuse=1)
# def test_large_search(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# m = re.search('$', s)
# self.assertIsNotNone(m)
# self.assertEqual(m.start(), size)
# self.assertEqual(m.end(), size)
# FIXME: brython: implement test.support
# The huge memuse is because of re.sub() using a list and a join()
# to create the replacement result.
# @bigmemtest(size=_2G, memuse=16 + 2)
# def test_large_subn(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# r, n = re.subn('', '', s)
# self.assertEqual(r, s)
# self.assertEqual(n, size + 1)
def test_bug_16688(self):
# Issue 16688: Backreferences make case-insensitive regex fail on
# non-ASCII strings.
self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a'])
self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2))
def test_repeat_minmax_overflow(self):
# Issue #13169
string = "x" * 100000
self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
# 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
# FIXME: brython: implement test.support
# @cpython_only
# def test_repeat_minmax_overflow_maxrepeat(self):
# try:
# from _sre import MAXREPEAT
# except ImportError:
# self.skipTest('requires _sre.MAXREPEAT constant')
# string = "x" * 100000
# self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
# self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
# (0, 100000))
# self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
# self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
def test_backref_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '<foo>'):
re.compile('(?P=<foo>)')
def test_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '\?foo'):
re.compile('(?P<?foo>)')
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
# FIXME: brython: implement test.support
# run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| agpl-3.0 |
jeremiahmarks/dangerzone | scripts/python/turtleRelated/newhex.py | 1 | 4150 | from fvh import MyTurtle
masterHexSet=[]
masterHexList=[]
distanceBetweenLayers=None
points=["sw","se","e","ne","nw","w"]
def drawit(circleXfromCenter,rootHex):
heading=90
rootHex.turt.pu()
rootHex.turt.seth(heading)
rootHex.setDistanceBetweenLayers()
rootHex.turt.fd(distanceBetweenLayers)
newturt=Hex(rootHex.turt.pos(), rootHex.length)
newturt.drawBlack()
heading=heading-60
for x in range(circleXfromCenter-1):
rootHex.turt.seth(heading)
rootHex.turt.fd(distanceBetweenLayers)
newturt=Hex(rootHex.turt.pos(), rootHex.length)
newturt.drawBlack()
for x in range(5):
heading=heading-60
for y in range(circleXfromCenter):
rootHex.turt.seth(heading)
rootHex.turt.fd(distanceBetweenLayers)
newturt=Hex(rootHex.turt.pos(), rootHex.length)
newturt.drawBlack()
def drawsome(x):
a=Hex((0,0),10)
a.drawBlack()
for circle in range(1,x):
drawit(circle,a)
counter=1
for ahex in masterHexList:
ahex.write(counter)
counter+=1
def checkInMasterHexSet(point):
tempx=round(point[0],-1)
tempy=round(point[1],-1)
temppos=(tempx,tempy)
return temppos in masterHexSet
class Hex(object):
def __init__(self, center, lengthperside):
global masterHexSet
global masterHexList
self.center=center
self.length=lengthperside
tempx=round(center[0],-1)
tempy=round(center[1],-1)
temppos=(tempx,tempy)
masterHexSet.append(temppos)
masterHexList.append(self)
self.turt=MyTurtle()
self.turt.setup()
self.turt.tracer(False)
self.points={}
def drawBlack(self):
self.turt.pu()
self.turt.pencolor('black')
self.turt.goto(self.center)
self.turt.seth(240)
self.turt.fd(self.length)
self.turt.seth(0)
self.turt.pd()
for eachpoint in points:
self.points[eachpoint]=self.turt.pos()
self.turt.fd(self.length)
self.turt.lt(60)
self.turt.pu()
self.turt.goto(self.center)
def drawRed(self):
self.turt.pu()
self.turt.pencolor('red')
self.turt.goto(self.center)
self.turt.seth(240)
self.turt.fd(self.length)
self.turt.seth(0)
self.turt.pd()
for eachpoint in points:
self.points[eachpoint]=self.turt.pos()
self.turt.fd(self.length)
self.turt.lt(60)
self.turt.pu()
self.turt.goto(self.center)
def setDistanceBetweenLayers(self):
global distanceBetweenLayers
distanceBetweenLayers=self.points['ne'][1]-self.points['se'][1]
def createLayers(self,numberOfLayers):
if not distanceBetweenLayers:
self.setDistanceBetweenLayers()
for x in range(1,numberOfLayers+1):
for y in range(6):
self.turt.pu()
self.turt.goto(self.center)
self.turt.seth(90-(y*60))
self.turt.fd(x*distanceBetweenLayers)
if not checkInMasterHexSet(self.turt.pos()):
tempHex=Hex(self.turt.pos(), self.length)
tempHex.drawBlack()
if (numberOfLayers-x>=1):
tempHex.createLayers(numberOfLayers-x)
def colorBlack(self):
self.turt.fillcolor('black')
self.turt.fill(True)
self.drawBlack()
self.turt.fill(False)
if self.something:
self.turt.pencolor('white')
self.turt.write(self.something)
self.turt.pencolor('black')
def colorWhite(self):
self.turt.fillcolor('white')
self.turt.fill(True)
self.drawBlack()
self.turt.fill(False)
if self.something:
self.turt.pencolor('black')
self.turt.write(self.something)
def write(self,something):
self.something=something
self.turt.goto(self.center)
self.turt.write(something)
| mit |
juga0/dhcpcanon | tests/dhcpcap_leases.py | 3 | 1968 | # -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
# Copyright 2016, 2017 juga (juga at riseup dot net), MIT license.
"""."""
from dhcpcanon.dhcpcaplease import DHCPCAPLease
LEASE_INIT = DHCPCAPLease(interface='enp0s25', address='', server_id='',
next_server='', router='', subnet_mask='',
broadcast_address='', domain='', name_server='',
lease_time='', renewal_time='', rebinding_time='',
subnet_mask_cidr='', subnet='', expiry='', renew='',
rebind='')
LEASE_REQUEST = DHCPCAPLease(interface='eth0', address='192.168.1.23',
server_id='192.168.1.1',
next_server='192.168.1.1',
router='192.168.1.1', subnet_mask='255.255.255.0',
broadcast_address='192.168.1.255',
domain='localdomain',
name_server='192.168.1.1 8.8.8.8',
lease_time='43200',
renewal_time='21600', rebinding_time='37800',
subnet_mask_cidr='24', subnet='192.168.1.0',
expiry='', renew='', rebind='')
LEASE_ACK = DHCPCAPLease(interface='eth0', address='192.168.1.23',
server_id='192.168.1.1', next_server='192.168.1.1',
router='192.168.1.1', subnet_mask='255.255.255.0',
broadcast_address='192.168.1.255',
domain='localdomain',
name_server='192.168.1.1 8.8.8.8', lease_time='43200',
renewal_time='21600', rebinding_time='37800',
subnet_mask_cidr='24', subnet='192.168.1.0',
expiry='17-06-23 12:00:00', renew='17-06-23 06:00:00',
rebind='17-06-23 10:30:00')
| mit |
atheed/servo | tests/wpt/css-tests/tools/py/testing/code/test_assertion.py | 160 | 7843 | import pytest, py
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_within_finally():
excinfo = py.test.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
py.test.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
class TestView:
def setup_class(cls):
cls.View = py.test.importorskip("py._code._assertionold").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
def test_underscore_api():
py.code._AssertionError
py.code._reinterpret_old # used by pypy
py.code._reinterpret
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
util = pytest.importorskip("_pytest.assertion.util")
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
| mpl-2.0 |
srikantbmandal/ansible | lib/ansible/modules/cloud/openstack/os_ironic_inspect.py | 22 | 6024 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_inspect
short_description: Explicitly triggers baremetal node introspection in ironic.
extends_documentation_fragment: openstack
author: "Julia Kreger (@juliakreger)"
version_added: "2.1"
description:
- Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
This command may be out of band or in-band depending on the ironic driver configuration.
This is only possible on nodes in 'manageable' and 'available' state.
options:
mac:
description:
- unique mac address that is used to attempt to identify the host.
required: false
default: None
uuid:
description:
- globally unique identifier (UUID) to identify the host.
required: false
default: None
name:
description:
- unique name identifier to identify the host in Ironic.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
Use with "auth" and "auth_type" settings set to None.
required: false
default: None
timeout:
description:
- A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
required: false
default: 1200
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
RETURN = '''
ansible_facts:
description: Dictionary of new facts representing discovered properties of the node..
returned: changed
type: complex
contains:
memory_mb:
description: Amount of node memory as updated in the node properties
type: string
sample: "1024"
cpu_arch:
description: Detected CPU architecture type
type: string
sample: "x86_64"
local_gb:
description: Total size of local disk storage as updaed in node properties.
type: string
sample: "10"
cpus:
description: Count of cpu cores defined in the updated node properties.
type: string
sample: "1"
'''
EXAMPLES = '''
# Invoke node inspection
- os_ironic_inspect:
name: "testnode1"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
def main():
argument_spec = openstack_full_argument_spec(
auth_type=dict(required=False),
uuid=dict(required=False),
name=dict(required=False),
mac=dict(required=False),
ironic_url=dict(required=False),
timeout=dict(default=1200, type='int', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.0.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.0.0")
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears to be disabled, "
"Please define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
try:
cloud = shade.operator_cloud(**module.params)
if module.params['name'] or module.params['uuid']:
server = cloud.get_machine(_choose_id_value(module))
elif module.params['mac']:
server = cloud.get_machine_by_mac(module.params['mac'])
else:
module.fail_json(msg="The worlds did not align, "
"the host was not found as "
"no name, uuid, or mac was "
"defined.")
if server:
cloud.inspect_machine(server['uuid'], module.params['wait'])
# TODO(TheJulia): diff properties, ?and ports? and determine
# if a change occurred. In theory, the node is always changed
# if introspection is able to update the record.
module.exit_json(changed=True,
ansible_facts=server['properties'])
else:
module.fail_json(msg="node not found.")
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
| gpl-3.0 |
jmcbailey/django-cached-hitcount | cached_hitcount/south_migrations/0002_auto__chg_field_hit_added.py | 2 | 2148 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Hit.added'
db.alter_column(u'cached_hitcount_hit', 'added', self.gf('django.db.models.fields.DateField')())
def backwards(self, orm):
# Changing field 'Hit.added'
db.alter_column(u'cached_hitcount_hit', 'added', self.gf('django.db.models.fields.DateTimeField')())
models = {
u'cached_hitcount.blacklistip': {
'Meta': {'object_name': 'BlacklistIP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'cached_hitcount.hit': {
'Meta': {'ordering': "('-hits',)", 'object_name': 'Hit'},
'added': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 2, 28, 0, 0)'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_hit'", 'to': u"orm['contenttypes.ContentType']"}),
'hits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cached_hitcount'] | gpl-3.0 |
Darkdadaah/pywikibot-core | tests/wikidataquery_tests.py | 1 | 9750 | # -*- coding: utf-8 -*-
"""Test cases for the WikidataQuery query syntax and API."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import time
import pywikibot
import pywikibot.data.wikidataquery as query
from pywikibot.page import ItemPage, PropertyPage, Claim
from tests.aspects import unittest, WikidataTestCase, TestCase
class TestDryApiFunctions(TestCase):
"""Test WikiDataQuery API functions."""
net = False
def testQueries(self):
"""
Test Queries and check whether they're behaving correctly.
Check that we produce the expected query strings and that
invalid inputs are rejected correctly
"""
q = query.HasClaim(99)
self.assertEqual(str(q), "claim[99]")
q = query.HasClaim(99, 100)
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100])
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100, 101])
self.assertEqual(str(q), "claim[99:100,101]")
q = query.NoClaim(99, [100, 101])
self.assertEqual(str(q), "noclaim[99:100,101]")
q = query.StringClaim(99, "Hello")
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.StringClaim(99, ["Hello"])
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.StringClaim(99, ["Hello", "world"])
self.assertEqual(str(q), 'string[99:"Hello","world"]')
self.assertRaises(TypeError, lambda: query.StringClaim(99, 2))
q = query.Tree(92, [1], 2)
self.assertEqual(str(q), 'tree[92][1][2]')
# missing third arg
q = query.Tree(92, 1)
self.assertEqual(str(q), 'tree[92][1][]')
# missing second arg
q = query.Tree(92, reverse=3)
self.assertEqual(str(q), 'tree[92][][3]')
q = query.Tree([92, 93], 1, [2, 7])
self.assertEqual(str(q), 'tree[92,93][1][2,7]')
# bad tree arg types
self.assertRaises(TypeError, lambda: query.Tree(99, "hello"))
q = query.Link("enwiki")
self.assertEqual(str(q), 'link[enwiki]')
q = query.NoLink(["enwiki", "frwiki"])
self.assertEqual(str(q), 'nolink[enwiki,frwiki]')
# bad link arg types
self.assertRaises(TypeError, lambda: query.Link(99))
self.assertRaises(TypeError, lambda: query.Link([99]))
# HasClaim with tree as arg
q = query.HasClaim(99, query.Tree(1, 2, 3))
self.assertEqual(str(q), "claim[99:(tree[1][2][3])]")
q = query.HasClaim(99, query.Tree(1, [2, 5], [3, 90]))
self.assertEqual(str(q), "claim[99:(tree[1][2,5][3,90])]")
class TestLiveApiFunctions(WikidataTestCase):
"""Test WikiDataQuery API functions."""
cached = True
def testQueriesWDStructures(self):
"""Test queries using Wikibase page structures like ItemPage."""
q = query.HasClaim(PropertyPage(self.repo, "P99"))
self.assertEqual(str(q), "claim[99]")
q = query.HasClaim(PropertyPage(self.repo, "P99"),
ItemPage(self.repo, "Q100"))
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100, PropertyPage(self.repo, "P101")])
self.assertEqual(str(q), "claim[99:100,101]")
q = query.StringClaim(PropertyPage(self.repo, "P99"), "Hello")
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.Tree(ItemPage(self.repo, "Q92"), [1], 2)
self.assertEqual(str(q), 'tree[92][1][2]')
q = query.Tree(ItemPage(self.repo, "Q92"), [PropertyPage(self.repo, "P101")], 2)
self.assertEqual(str(q), 'tree[92][101][2]')
self.assertRaises(TypeError, lambda: query.Tree(PropertyPage(self.repo, "P92"),
[PropertyPage(self.repo, "P101")],
2))
c = pywikibot.Coordinate(50, 60)
q = query.Around(PropertyPage(self.repo, "P625"), c, 23.4)
self.assertEqual(str(q), 'around[625,50,60,23.4]')
begin = pywikibot.WbTime(site=self.repo, year=1999)
end = pywikibot.WbTime(site=self.repo, year=2010, hour=1)
# note no second comma
q = query.Between(PropertyPage(self.repo, "P569"), begin)
self.assertEqual(str(q), 'between[569,+00000001999-01-01T00:00:00Z]')
q = query.Between(PropertyPage(self.repo, "P569"), end=end)
self.assertEqual(str(q), 'between[569,,+00000002010-01-01T01:00:00Z]')
q = query.Between(569, begin, end)
self.assertEqual(str(q),
'between[569,+00000001999-01-01T00:00:00Z,+00000002010-01-01T01:00:00Z]')
# try negative year
begin = pywikibot.WbTime(site=self.repo, year=-44)
q = query.Between(569, begin, end)
self.assertEqual(str(q),
'between[569,-00000000044-01-01T00:00:00Z,+00000002010-01-01T01:00:00Z]')
def testQueriesDirectFromClaim(self):
"""Test construction of the right Query from a page.Claim."""
# Datatype: item
claim = Claim(self.repo, 'P17')
claim.setTarget(pywikibot.ItemPage(self.repo, 'Q35'))
q = query.fromClaim(claim)
self.assertEqual(str(q), 'claim[17:35]')
# Datatype: string
claim = Claim(self.repo, 'P225')
claim.setTarget('somestring')
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[225:"somestring"]')
# Datatype: external-id
claim = Claim(self.repo, 'P268')
claim.setTarget('somestring')
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[268:"somestring"]')
# Datatype: commonsMedia
claim = Claim(self.repo, 'P18')
claim.setTarget(
pywikibot.FilePage(
pywikibot.Site(self.family, self.code),
'Foo.jpg'))
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[18:"Foo.jpg"]')
def testQuerySets(self):
"""Test that we can join queries together correctly."""
# construct via queries
qs = query.HasClaim(99, 100).AND(query.HasClaim(99, 101))
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101]')
self.assertEqual(repr(qs), 'QuerySet(claim[99:100] AND claim[99:101])')
qs = query.HasClaim(99, 100).AND(query.HasClaim(99, 101)).AND(query.HasClaim(95))
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101] AND claim[95]')
# construct via queries
qs = query.HasClaim(99, 100).AND([query.HasClaim(99, 101), query.HasClaim(95)])
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101] AND claim[95]')
qs = query.HasClaim(99, 100).OR([query.HasClaim(99, 101), query.HasClaim(95)])
self.assertEqual(str(qs), 'claim[99:100] OR claim[99:101] OR claim[95]')
q1 = query.HasClaim(99, 100)
q2 = query.HasClaim(99, 101)
# different joiners get explicit grouping parens (the api also allows
# implicit, but we don't do that)
qs1 = q1.AND(q2)
qs2 = q1.OR(qs1).AND(query.HasClaim(98))
self.assertEqual(str(qs2),
'(claim[99:100] OR (claim[99:100] AND claim[99:101])) AND claim[98]')
# if the joiners are the same, no need to group
qs1 = q1.AND(q2)
qs2 = q1.AND(qs1).AND(query.HasClaim(98))
self.assertEqual(str(qs2),
'claim[99:100] AND claim[99:100] AND claim[99:101] AND claim[98]')
qs1 = query.HasClaim(100).AND(query.HasClaim(101))
qs2 = qs1.OR(query.HasClaim(102))
self.assertEqual(str(qs2), '(claim[100] AND claim[101]) OR claim[102]')
qs = query.Link("enwiki").AND(query.NoLink("dewiki"))
self.assertEqual(str(qs), 'link[enwiki] AND nolink[dewiki]')
def testQueryApiSyntax(self):
"""Test that we can generate the API query correctly."""
w = query.WikidataQuery("http://example.com")
qs = w.getQueryString(query.Link("enwiki"))
self.assertEqual(qs, "q=link%5Benwiki%5D")
self.assertEqual(w.getUrl(qs), "http://example.com/api?q=link%5Benwiki%5D")
# check labels and props work OK
qs = w.getQueryString(query.Link("enwiki"), ['en', 'fr'], ['prop'])
self.assertEqual(qs, "q=link%5Benwiki%5D&labels=en,fr&props=prop")
class TestApiSlowFunctions(TestCase):
"""Test slow WikiDataQuery API functions."""
hostname = 'https://wdq.wmflabs.org/api'
def testQueryApiGetter(self):
"""Test that we can actually retreive data and that caching works."""
w = query.WikidataQuery(cacheMaxAge=0)
# this query doesn't return any items, save a bit of bandwidth!
q = query.HasClaim(105).AND([query.NoClaim(225), query.HasClaim(100)])
# check that the cache file is created
cacheFile = w.getCacheFilename(w.getQueryString(q, [], []))
# remove existing cache file
try:
os.remove(cacheFile)
except OSError:
pass
data = w.query(q)
self.assertFalse(os.path.exists(cacheFile))
w = query.WikidataQuery(cacheMaxAge=0.1)
data = w.query(q)
self.assertTrue(os.path.exists(cacheFile))
self.assertIn('status', data)
self.assertIn('items', data)
t1 = time.time()
data = w.query(q)
t2 = time.time()
# check that the cache access is fast
self.assertLess(t2 - t1, 0.2)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit |
theflofly/tensorflow | tensorflow/python/ops/list_ops.py | 15 | 12749 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
# list_ops -> control_flow_ops -> tensor_array_ops -> list_ops
control_flow_ops = LazyLoader(
"control_flow_ops", globals(),
"tensorflow.python.ops.control_flow_ops")
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
return gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
def tensor_list_from_tensor(tensor, element_shape, name=None):
return gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
def tensor_list_get_item(input_handle, index, element_dtype, element_shape=None,
name=None):
return gen_list_ops.tensor_list_get_item(
input_handle=input_handle,
index=index,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_pop_back(input_handle, element_dtype, name=None):
return gen_list_ops.tensor_list_pop_back(
input_handle=input_handle,
element_shape=-1,
element_dtype=element_dtype,
name=name)
def tensor_list_gather(input_handle,
indices,
element_dtype,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_gather(
input_handle=input_handle,
indices=indices,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
name=name)
def tensor_list_scatter(tensor,
indices,
element_shape=None,
input_handle=None,
name=None):
if input_handle is not None:
return gen_list_ops.tensor_list_scatter_into_existing_list(
input_handle=input_handle, tensor=tensor, indices=indices, name=name)
else:
return gen_list_ops.tensor_list_scatter_v2(
tensor=tensor,
indices=indices,
element_shape=_build_element_shape(element_shape),
num_elements=-1,
name=name)
def tensor_list_stack(input_handle,
element_dtype,
num_elements=-1,
element_shape=None,
name=None):
return gen_list_ops.tensor_list_stack(
input_handle=input_handle,
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
num_elements=num_elements,
name=name)
def tensor_list_concat(input_handle, element_dtype, element_shape=None,
name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat_v2(
input_handle=input_handle,
element_dtype=element_dtype,
element_shape=_build_element_shape(element_shape),
leading_dims=ops.convert_to_tensor([], dtype=dtypes.int64),
name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
def tensor_list_set_item(input_handle,
index,
item,
resize_if_index_out_of_bounds=False,
name=None):
"""Sets `item` at `index` in input list."""
if resize_if_index_out_of_bounds:
input_list_size = gen_list_ops.tensor_list_length(input_handle)
# TODO(srbs): This could cause some slowdown. Consider fusing resize
# functionality in the SetItem op.
input_handle = control_flow_ops.cond(
index >= input_list_size,
lambda: gen_list_ops.tensor_list_resize( # pylint: disable=g-long-lambda
input_handle, index + 1),
lambda: input_handle)
return gen_list_ops.tensor_list_set_item(
input_handle=input_handle, index=index, item=item, name=name)
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult,
element_shape=array_ops.shape(op.inputs[1]),
element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
return gen_list_ops.tensor_list_push_back(dlist, delement), None
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:]), None
@ops.RegisterGradient("TensorListConcat")
@ops.RegisterGradient("TensorListConcatV2")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
"""Gradient function for TensorListConcat."""
dlist = tensor_list_split(
dtensor,
element_shape=gen_list_ops.tensor_list_element_shape(
op.inputs[0], shape_type=dtypes.int32),
lengths=op.outputs[1])
if op.type == "TensorListConcatV2":
return dlist, None, None
else:
return dlist
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
tensor, _, lengths = op.inputs
element_shape = array_ops.slice(array_ops.shape(tensor), [1], [-1])
element_shape = array_ops.concat([[-1], element_shape], axis=0)
return gen_list_ops.tensor_list_concat_v2(
dlist,
element_shape=element_shape,
leading_dims=lengths,
element_dtype=op.inputs[0].dtype)[0], None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
t = op.inputs[0]
if t.shape.dims and t.shape.dims[0].value is not None:
num_elements = t.shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=t.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist,
element_shape=array_ops.slice(array_ops.shape(t), [1], [-1]),
element_dtype=t.dtype,
num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
element_shape_grad = None
return list_grad, index_grad, element_shape_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
"""Gradient function for TensorListSetItem."""
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = tensor_list_get_item(
dlist,
index,
element_shape=array_ops.shape(item),
element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListResize")
def _TensorListResizeGrad(op, dlist):
input_list, _ = op.inputs
input_list_size = gen_list_ops.tensor_list_length(input_list)
return gen_list_ops.tensor_list_resize(dlist, input_list_size), None
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
"""Gradient function for TensorListGather."""
input_list, indices, _ = op.inputs
element_shape = gen_list_ops.tensor_list_element_shape(
input_list, shape_type=dtypes.int32)
num_elements = gen_list_ops.tensor_list_length(input_list)
dlist = tensor_list_reserve(element_shape, num_elements, dtensor.dtype)
dlist = tensor_list_scatter(
tensor=dtensor, indices=indices, input_handle=dlist)
return dlist, None, None
@ops.RegisterGradient("TensorListScatter")
@ops.RegisterGradient("TensorListScatterV2")
def _TensorListScatterGrad(op, dlist):
"""Gradient function for TensorListScatter."""
tensor = op.inputs[0]
indices = op.inputs[1]
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
if op.type == "TensorListScatterV2":
return dtensor, None, None, None
else:
return dtensor, None, None
@ops.RegisterGradient("TensorListScatterIntoExistingList")
def _TensorListScatterIntoExistingListGrad(op, dlist):
"""Gradient function for TensorListScatterIntoExistingList."""
_, tensor, indices = op.inputs
dtensor = gen_list_ops.tensor_list_gather(
dlist,
indices,
element_shape=array_ops.slice(array_ops.shape(tensor), [1], [-1]),
element_dtype=tensor.dtype)
zeros = array_ops.zeros_like(tensor)
dlist = tensor_list_scatter(zeros, indices, indices, input_handle=dlist)
return dlist, dtensor, None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
def convert(val):
if val is None:
return -1
if isinstance(val, ops.Tensor):
return val
if isinstance(val, tensor_shape.Dimension):
return val.value if val.value is not None else -1
return val
return [convert(d) for d in shape]
| apache-2.0 |
pivie/cctools | galaxy/makeflow_gatk_wrapper.py | 9 | 3066 | #!/usr/bin/env cctools_python
# CCTOOLS_PYTHON_VERSION 2.7 2.6
#
#Copyright (C) 2013- The University of Notre Dame
#This software is distributed under the GNU General Public License.
#See the file COPYING for details.
#
# This program implements a way to organize and manage a large number of
# concurrently running GATK instances
# Author: Nick Hazekamp
# Date: 09/03/2013
import optparse, os, sys, tempfile, shutil, stat
class PassThroughParser(optparse.OptionParser):
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self,largs,rargs,values)
except (optparse.BadOptionError,optparse.AmbiguousOptionError), e:
largs.append(e.opt_str)
#Parse Command Line
parser = PassThroughParser()
parser.add_option('-T',dest='type',type="string")
parser.add_option('--input_file',dest='input',type="string")
parser.add_option('--reference_sequence',dest='ref',type="string")
parser.add_option('--log_to_file',dest='log',type="string")
parser.add_option('--out',dest='output',type="string")
parser.add_option('--mf_log',dest='mflog',type="string",help="Makeflow Log Location")
parser.add_option('--output_dblog',dest='dblog',type="string",help="Makeflow Debug Log Location")
parser.add_option('--wq_log',dest='wqlog',type="string",help="Work Queue Log Location")
parser.add_option('--pwfile',dest='pwfile',type='string')
parser.add_option('--user_id',dest='uid',type='string')
parser.add_option('--user_job',dest='ujob',type='string')
(options, args) = parser.parse_args()
# SETUP ENVIRONMENT VARIABLES
cur_dir = os.getcwd()
job_num = os.path.basename(cur_dir)
cctools_dir = options.cctools
makeflow='Makeflow'
wq_project_name="galaxy_gatk_"+options.uid+"_"+job_num
wq_password=options.pwfile
output_vcf = "output_VCF"
output_log = "output_log"
makeflow_log = "makeflow_log"
makeflow_graph = "makeflow_graph.eps"
wq_log = "wq_log"
wq_graph = "wq_graph.eps"
debug_log = "debug_log"
output_err = "output_err"
# MOVE FILES TO ENV
os.symlink(options.ref, cur_dir+"/reference.fa")
inputs = "--reference_sequence reference.fa --reference_index reference.fa.fai --reference_dict reference.dict "
os.symlink(options.input, cur_dir+"/cur_bam.bam")
inputs += "--input_file cur_bam.bam "
os.system("makeflow_gatk -T {0} {1} --makeflow {2} --out {3} {4} {5}".format(
options.type, inputs, makeflow, output_vcf, ' '.join(args), debug_log))
os.system("makeflow -T wq -N {0} -p 0 -l {1} -L {2} -d all -o {3} --password {4} &> {5}".format(
wq_project_name, makeflow_log, wq_log, debug_log, options.pwfile, debug_log)
if options.dblog:
shutil.copyfile(debug_log, options.dblog)
if options.mflog:
shutil.copyfile(makeflow_log, options.mflog)
if options.wqlog:
shutil.copyfile(wq_log, options.wqlog)
shutil.copyfile(output_vcf, options.output)
os.system(cctools_dir+'/bin/makeflow -c')
os.remove("./reference.*")
os.remove("./cur_bam.bam")
os.remove("./samtools")
os.remove("./GenomeAnalysisTK.jar")
os.remove("./picard.jar")
os.remove("./jre")
| gpl-2.0 |
gameduell/duell | bin/win/python2.7.9/Lib/distutils/tests/test_build_py.py | 46 | 5064 | """Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
from test.test_support import run_unittest
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
try:
f.write("# Pretend this is a package.")
finally:
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
try:
f.write("Info about this package")
finally:
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assertIn("__init__.py", files)
self.assertIn("README.txt", files)
# XXX even with -O, distutils writes pyc, not pyo; bug?
if sys.dont_write_bytecode:
self.assertNotIn("__init__.pyc", files)
else:
self.assertIn("__init__.pyc", files)
def test_empty_package_dir(self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dir_in_package_data(self):
"""
A directory in package_data should not be added to the filelist.
"""
# See bug 19286
sources = self.mkdtemp()
pkg_dir = os.path.join(sources, "pkg")
os.mkdir(pkg_dir)
open(os.path.join(pkg_dir, "__init__.py"), "w").close()
docdir = os.path.join(pkg_dir, "doc")
os.mkdir(docdir)
open(os.path.join(docdir, "testfile"), "w").close()
# create the directory that could be incorrectly detected as a file
os.mkdir(os.path.join(docdir, 'otherdir'))
os.chdir(sources)
dist = Distribution({"packages": ["pkg"],
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data when data dir includes a dir")
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertIn('byte-compiling is disabled', self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| bsd-2-clause |
chippey/gaffer | python/GafferDispatch/Wedge.py | 1 | 5185 | ##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import IECore
import Gaffer
import GafferDispatch
class Wedge( GafferDispatch.TaskContextProcessor ) :
Mode = IECore.Enum.create( "FloatRange", "IntRange", "ColorRange", "FloatList", "IntList", "StringList" )
def __init__( self, name = "Wedge" ) :
GafferDispatch.TaskContextProcessor.__init__( self, name )
self["variable"] = Gaffer.StringPlug( defaultValue = "wedge:value" )
self["indexVariable"] = Gaffer.StringPlug( defaultValue = "wedge:index" )
self["mode"] = Gaffer.IntPlug(
defaultValue = int( self.Mode.FloatRange ),
minValue = int( self.Mode.FloatRange ),
maxValue = int( self.Mode.StringList ),
)
# float range
self["floatMin"] = Gaffer.FloatPlug( defaultValue = 0 )
self["floatMax"] = Gaffer.FloatPlug( defaultValue = 1 )
self["floatSteps"] = Gaffer.IntPlug( minValue = 2, defaultValue = 11 )
# int range
self["intMin"] = Gaffer.IntPlug( defaultValue = 0 )
self["intMax"] = Gaffer.IntPlug( defaultValue = 5 )
self["intStep"] = Gaffer.IntPlug( minValue = 1, defaultValue = 1 )
# color range
self["ramp"] = Gaffer.SplinefColor3fPlug(
defaultValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 0 ) ),
( 0, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 1 ) ),
)
)
)
self["colorSteps"] = Gaffer.IntPlug( defaultValue = 5, minValue = 2 )
# lists
self["floats"] = Gaffer.FloatVectorDataPlug( defaultValue = IECore.FloatVectorData() )
self["ints"] = Gaffer.IntVectorDataPlug( defaultValue = IECore.IntVectorData() )
self["strings"] = Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() )
def values( self ) :
mode = self.Mode( self["mode"].getValue() )
if mode == self.Mode.FloatRange :
min = self["floatMin"].getValue()
max = self["floatMax"].getValue()
steps = self["floatSteps"].getValue()
values = []
for i in range( 0, steps ) :
t = float( i ) / ( steps - 1 )
values.append( min + t * ( max - min ) )
elif mode == self.Mode.IntRange :
min = self["intMin"].getValue()
max = self["intMax"].getValue()
step = self["intStep"].getValue()
if max < min :
min, max = max, min
if step == 0 :
raise RuntimeError( "Invalid step - step must not be 0" )
elif step < 0 :
step = -step
values = []
while True :
value = min + len( values ) * step
if value > max :
break
values.append( value )
elif mode == self.Mode.ColorRange :
spline = self["ramp"].getValue()
steps = self["colorSteps"].getValue()
values = [ spline( i / float( steps - 1 ) ) for i in range( 0, steps ) ]
elif mode == self.Mode.FloatList :
values = self["floats"].getValue()
elif mode == self.Mode.IntList :
values = self["ints"].getValue()
elif mode == self.Mode.StringList :
values = self["strings"].getValue()
return values
def _processedContexts( self, context ) :
# make a context for each of the wedge values
variable = self["variable"].getValue()
indexVariable = self["indexVariable"].getValue()
contexts = []
for index, value in enumerate( self.values() ) :
contexts.append( Gaffer.Context( context ) )
contexts[-1][variable] = value
contexts[-1][indexVariable] = index
return contexts
IECore.registerRunTimeTyped( Wedge, typeName = "GafferDispatch::Wedge" )
| bsd-3-clause |
YueLinHo/Subversion | tools/dev/wc-ng/count-progress.py | 7 | 3223 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
import os, sys
SKIP = ['deprecated.c',
'entries.c',
'entries.h',
'old-and-busted.c']
TERMS = ['svn_wc_adm_access_t',
'svn_wc_entry_t',
'svn_wc__node_',
'svn_wc__db_temp_',
'svn_wc__db_node_hidden',
'svn_wc__loggy',
'svn_wc__db_wq_add',
]
def get_files_in(path):
names = os.listdir(path)
for skip in SKIP:
try:
names.remove(skip)
except ValueError:
pass
return [os.path.join(path, fname) for fname in names
if fname.endswith('.c') or fname.endswith('.h')]
def count_terms_in(path):
files = get_files_in(path)
counts = {}
for term in TERMS:
counts[term] = 0
for filepath in get_files_in(path):
contents = open(filepath).read()
for term in TERMS:
counts[term] += contents.count(term)
return counts
def print_report(wcroot):
client = count_terms_in(os.path.join(wcroot, 'subversion', 'libsvn_client'))
wc = count_terms_in(os.path.join(wcroot, 'subversion', 'libsvn_wc'))
client_total = 0
wc_total = 0
FMT = '%22s |%14s |%10s |%6s'
SEP = '%s+%s+%s+%s' % (23*'-', 15*'-', 11*'-', 7*'-')
print(FMT % ('', 'libsvn_client', 'libsvn_wc', 'Total'))
print(SEP)
for term in TERMS:
print(FMT % (term, client[term], wc[term], client[term] + wc[term]))
client_total += client[term]
wc_total += wc[term]
print(SEP)
print(FMT % ('Total', client_total, wc_total, client_total + wc_total))
def usage():
print("""\
Usage: %s [WCROOT]
%s --help
Show statistics related to outstanding WC-NG code conversion work
items in working copy branch root WCROOT. If WCROOT is omitted, this
program will attempt to guess it using the assumption that it is being
run from within the working copy of interest."""
% (sys.argv[0], sys.argv[0]))
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) > 1:
if '--help' in sys.argv[1:]:
usage()
print_report(sys.argv[1])
else:
cwd = os.path.abspath(os.getcwd())
idx = cwd.rfind(os.sep + 'subversion')
if idx > 0:
wcroot = cwd[:idx]
else:
idx = cwd.rfind(os.sep + 'tools')
if idx > 0:
wcroot = cwd[:idx]
elif os.path.exists(os.path.join(cwd, 'subversion')):
wcroot = cwd
else:
print("ERROR: the root of 'trunk' cannot be located -- please provide")
sys.exit(1)
print_report(wcroot)
| apache-2.0 |
peiyuwang/pants | src/python/pants/reporting/reporter.py | 26 | 3119 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import namedtuple
from pants.reporting.report import Report
class Reporter(object):
"""Formats and emits reports.
Subclasses implement the callback methods, to provide specific reporting
functionality, e.g., to console or to browser.
"""
# Generic reporting settings.
# log_level: Display log messages up to this level.
# subsettings: subclass-specific settings.
Settings = namedtuple('Settings', ['log_level'])
def __init__(self, run_tracker, settings):
self.run_tracker = run_tracker
self.settings = settings
def open(self):
"""Begin the report."""
pass
def close(self):
"""End the report."""
pass
def start_workunit(self, workunit):
"""A new workunit has started."""
pass
def end_workunit(self, workunit):
"""A workunit has finished."""
pass
def handle_log(self, workunit, level, *msg_elements):
"""Handle a message logged by pants code.
level: One of the constants above.
Each element in msg_elements is either a message or a (message, detail) pair.
A subclass must show the message, but may choose to show the detail in some
sensible way (e.g., when the message text is clicked on in a browser).
This convenience implementation filters by log level and then delegates to do_handle_log.
"""
if level <= self.level_for_workunit(workunit, self.settings.log_level):
self.do_handle_log(workunit, level, *msg_elements)
def do_handle_log(self, workunit, level, *msg_elements):
"""Handle a message logged by pants code, after it's passed the log level check."""
pass
def handle_output(self, workunit, label, s):
"""Handle output captured from an invoked tool (e.g., javac).
workunit: The innermost WorkUnit in which the tool was invoked.
label: Classifies the output e.g., 'stdout' for output captured from a tool's stdout or
'debug' for debug output captured from a tool's logfiles.
s: The content captured.
"""
pass
def is_under_main_root(self, workunit):
"""Is the workunit running under the main thread's root."""
return self.run_tracker.is_under_main_root(workunit)
def level_for_workunit(self, workunit, default_level):
if workunit.log_config and workunit.log_config.level:
# The value of the level option is a string defined in global_options.py
if workunit.log_config.level == 'warn':
return Report.WARN
if workunit.log_config.level == 'debug':
return Report.DEBUG
if workunit.log_config.level == 'info':
return Report.INFO
return default_level
def use_color_for_workunit(self, workunit, default_use_colors):
if workunit.log_config and workunit.log_config.colors is not None:
return workunit.log_config.colors
return default_use_colors
| apache-2.0 |
hieukypc/ERP | openerp/addons/hr_expense/report/hr_expense_report.py | 29 | 2924 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import osv
from openerp.tools.misc import formatLang
from openerp.tools.translate import _
from openerp.report import report_sxw
from openerp.exceptions import UserError
class report_expense(report_sxw.rml_parse):
def set_context(self, objects, data, ids, report_type=None):
res = super(report_expense, self).set_context(objects, data, ids, report_type=report_type)
hr_expense_obj = self.pool.get('hr.expense')
context = {'lang': self.pool.get('res.users').browse(self.cr, self.uid, self.uid).lang}
state_field = hr_expense_obj.fields_get(self.cr, self.uid, 'state', context=context)['state']['selection']
state_dict = {}
for state_tuple in state_field:
state_dict[state_tuple[0]] = state_tuple[1]
expenses_info = {}
expenses = hr_expense_obj.search(self.cr, self.uid, [('id', 'in', ids)], order="employee_id, currency_id, state, date")
for expense in hr_expense_obj.browse(self.cr, self.uid, expenses):
key = expense.employee_id.name + '-' + expense.currency_id.name + '-' + expense.state
if expenses_info.get(key):
expenses_info[key]['lines'] += expense
expenses_info[key]['total_amount'] += expense.total_amount
else:
expenses_info[key] = {
'employee_name': expense.employee_id.name,
'total_amount': expense.total_amount,
'lines': expense,
'currency': expense.currency_id,
'validator_name': expense.employee_id.parent_id.name,
'notes': [],
'notes_index': {},
'state': state_dict[expense.state],
}
if expense.description:
index = len(expenses_info[key]['notes']) + 1
expenses_info[key]['notes'].append({'description': expense.description, 'index':index})
expenses_info[key]['notes_index'][expense.id] = index
# Qweb for-each do not work on dict, so we send a list and we sort it by the name of the employee
# that way if we have two sheet for the same employee they will follow in the report
self.localcontext.update({
'get_expenses': lambda : [v for k,v in sorted(expenses_info.items())],
})
return res
class report_hr_expense(osv.AbstractModel):
_name = 'report.hr_expense.report_expense'
_inherit = 'report.abstract_report'
_template = 'hr_expense.report_expense'
_wrapped_report_class = report_expense
| gpl-3.0 |
aidanhs/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/benchmark_helper_wsh.py | 451 | 3234 | # Copyright 2013, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Handler for benchmark.html."""
def web_socket_do_extra_handshake(request):
# Turn off compression.
request.ws_extension_processors = []
def web_socket_transfer_data(request):
data = ''
while True:
command = request.ws_stream.receive_message()
if command is None:
return
if not isinstance(command, unicode):
raise ValueError('Invalid command data:' + command)
commands = command.split(' ')
if len(commands) == 0:
raise ValueError('Invalid command data: ' + command)
if commands[0] == 'receive':
if len(commands) != 2:
raise ValueError(
'Illegal number of arguments for send command' +
command)
size = int(commands[1])
# Reuse data if possible.
if len(data) != size:
data = 'a' * size
request.ws_stream.send_message(data, binary=True)
elif commands[0] == 'send':
if len(commands) != 2:
raise ValueError(
'Illegal number of arguments for receive command' +
command)
verify_data = commands[1] == '1'
data = request.ws_stream.receive_message()
if data is None:
raise ValueError('Payload not received')
size = len(data)
if verify_data:
if data != 'a' * size:
raise ValueError('Payload verification failed')
request.ws_stream.send_message(str(size))
else:
raise ValueError('Invalid command: ' + commands[0])
# vi:sts=4 sw=4 et
| mpl-2.0 |
liampauling/betfair | tests/test_scoresresources.py | 2 | 1117 | import unittest
from betfairlightweight import resources
from tests.tools import create_mock_json
class ScoreResourcesTest(unittest.TestCase):
def test_racedetails(self):
mock_response = create_mock_json("tests/resources/racedetails.json")
resource = resources.RaceDetails(**mock_response.json())
assert isinstance(resource, resources.RaceDetails)
# def test_score(self):
# mock_response = create_mock_json('tests/resources/score.json')
# resource = resources.Score(**mock_response.json())
#
# assert isinstance(resource, resources.Score)
#
# def test_incidents(self):
# mock_response = create_mock_json('tests/resources/incidents.json')
# resource = resources.Incidents(**mock_response.json())
#
# assert isinstance(resource, resources.Incidents)
#
# def test_available_events(self):
# mock_response = create_mock_json('tests/resources/availableevents.json')
# resource = resources.AvailableEvent(**mock_response.json())
#
# assert isinstance(resource, resources.AvailableEvent)
| mit |
oubiwann/metaphone | metaphone/word.py | 1 | 1414 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unicodedata
class Word(object):
"""
"""
def __init__(self, input):
self.original = input
if isinstance(input, bytes):
self.decoded = input.decode('utf-8', 'ignore')
else:
self.decoded = input
self.decoded = self.decoded.replace('\xc7', "s")
self.decoded = self.decoded.replace('\xe7', "s")
self.normalized = ''.join(
(c for c in unicodedata.normalize('NFD', self.decoded)
if unicodedata.category(c) != 'Mn'))
self.upper = self.normalized.upper()
self.length = len(self.upper)
self.prepad = " "
self.start_index = len(self.prepad)
self.end_index = self.start_index + self.length - 1
self.postpad = " "
# so we can index beyond the begining and end of the input string
self.buffer = self.prepad + self.upper + self.postpad
@property
def is_slavo_germanic(self):
return (
self.upper.find('W') > -1
or self.upper.find('K') > -1
or self.upper.find('CZ') > -1
or self.upper.find('WITZ') > -1)
def get_letters(self, start=0, end=None):
if not end:
end = start + 1
start = self.start_index + start
end = self.start_index + end
return self.buffer[start:end]
| bsd-3-clause |
shootstar/novatest | nova/tests/virt/baremetal/test_utils.py | 16 | 2105 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal utils."""
import errno
import os
from nova import test
from nova.virt.baremetal import utils
class BareMetalUtilsTestCase(test.TestCase):
def test_random_alnum(self):
s = utils.random_alnum(10)
self.assertEqual(len(s), 10)
s = utils.random_alnum(100)
self.assertEqual(len(s), 100)
def test_unlink(self):
self.mox.StubOutWithMock(os, "unlink")
os.unlink("/fake/path")
self.mox.ReplayAll()
utils.unlink_without_raise("/fake/path")
self.mox.VerifyAll()
def test_unlink_ENOENT(self):
self.mox.StubOutWithMock(os, "unlink")
os.unlink("/fake/path").AndRaise(OSError(errno.ENOENT))
self.mox.ReplayAll()
utils.unlink_without_raise("/fake/path")
self.mox.VerifyAll()
def test_create_link(self):
self.mox.StubOutWithMock(os, "symlink")
os.symlink("/fake/source", "/fake/link")
self.mox.ReplayAll()
utils.create_link_without_raise("/fake/source", "/fake/link")
self.mox.VerifyAll()
def test_create_link_EEXIST(self):
self.mox.StubOutWithMock(os, "symlink")
os.symlink("/fake/source", "/fake/link").AndRaise(
OSError(errno.EEXIST))
self.mox.ReplayAll()
utils.create_link_without_raise("/fake/source", "/fake/link")
self.mox.VerifyAll()
| apache-2.0 |
dennisss/sympy | sympy/logic/tests/test_inference.py | 15 | 9755 | """For more tests on satisfiability, see test_dimacs"""
from sympy import symbols, Q
from sympy.logic.boolalg import And, Implies, Equivalent, true, false
from sympy.logic.inference import literal_symbol, \
pl_true, satisfiable, valid, entails, PropKB
from sympy.logic.algorithms.dpll import dpll, dpll_satisfiable, \
find_pure_symbol, find_unit_clause, unit_propagate, \
find_pure_symbol_int_repr, find_unit_clause_int_repr, \
unit_propagate_int_repr
from sympy.utilities.pytest import raises
def test_literal():
A, B = symbols('A,B')
assert literal_symbol(True) is True
assert literal_symbol(False) is False
assert literal_symbol(A) is A
assert literal_symbol(~A) is A
def test_find_pure_symbol():
A, B, C = symbols('A,B,C')
assert find_pure_symbol([A], [A]) == (A, True)
assert find_pure_symbol([A, B], [~A | B, ~B | A]) == (None, None)
assert find_pure_symbol([A, B, C], [ A | ~B, ~B | ~C, C | A]) == (A, True)
assert find_pure_symbol([A, B, C], [~A | B, B | ~C, C | A]) == (B, True)
assert find_pure_symbol([A, B, C], [~A | ~B, ~B | ~C, C | A]) == (B, False)
assert find_pure_symbol(
[A, B, C], [~A | B, ~B | ~C, C | A]) == (None, None)
def test_find_pure_symbol_int_repr():
assert find_pure_symbol_int_repr([1], [set([1])]) == (1, True)
assert find_pure_symbol_int_repr([1, 2],
[set([-1, 2]), set([-2, 1])]) == (None, None)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([1, -2]), set([-2, -3]), set([3, 1])]) == (1, True)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, 2]), set([2, -3]), set([3, 1])]) == (2, True)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, -2]), set([-2, -3]), set([3, 1])]) == (2, False)
assert find_pure_symbol_int_repr([1, 2, 3],
[set([-1, 2]), set([-2, -3]), set([3, 1])]) == (None, None)
def test_unit_clause():
A, B, C = symbols('A,B,C')
assert find_unit_clause([A], {}) == (A, True)
assert find_unit_clause([A, ~A], {}) == (A, True) # Wrong ??
assert find_unit_clause([A | B], {A: True}) == (B, True)
assert find_unit_clause([A | B], {B: True}) == (A, True)
assert find_unit_clause(
[A | B | C, B | ~C, A | ~B], {A: True}) == (B, False)
assert find_unit_clause([A | B | C, B | ~C, A | B], {A: True}) == (B, True)
assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True)
def test_unit_clause_int_repr():
assert find_unit_clause_int_repr(map(set, [[1]]), {}) == (1, True)
assert find_unit_clause_int_repr(map(set, [[1], [-1]]), {}) == (1, True)
assert find_unit_clause_int_repr([set([1, 2])], {1: True}) == (2, True)
assert find_unit_clause_int_repr([set([1, 2])], {2: True}) == (1, True)
assert find_unit_clause_int_repr(map(set,
[[1, 2, 3], [2, -3], [1, -2]]), {1: True}) == (2, False)
assert find_unit_clause_int_repr(map(set,
[[1, 2, 3], [3, -3], [1, 2]]), {1: True}) == (2, True)
A, B, C = symbols('A,B,C')
assert find_unit_clause([A | B | C, B | ~C, A ], {}) == (A, True)
def test_unit_propagate():
A, B, C = symbols('A,B,C')
assert unit_propagate([A | B], A) == []
assert unit_propagate([A | B, ~A | C, ~C | B, A], A) == [C, ~C | B, A]
def test_unit_propagate_int_repr():
assert unit_propagate_int_repr([set([1, 2])], 1) == []
assert unit_propagate_int_repr(map(set,
[[1, 2], [-1, 3], [-3, 2], [1]]), 1) == [set([3]), set([-3, 2])]
def test_dpll():
"""This is also tested in test_dimacs"""
A, B, C = symbols('A,B,C')
assert dpll([A | B], [A, B], {A: True, B: True}) == {A: True, B: True}
def test_dpll_satisfiable():
A, B, C = symbols('A,B,C')
assert dpll_satisfiable( A & ~A ) is False
assert dpll_satisfiable( A & ~B ) == {A: True, B: False}
assert dpll_satisfiable(
A | B ) in ({A: True}, {B: True}, {A: True, B: True})
assert dpll_satisfiable(
(~A | B) & (~B | A) ) in ({A: True, B: True}, {A: False, B: False})
assert dpll_satisfiable( (A | B) & (~B | C) ) in ({A: True, B: False},
{A: True, C: True}, {B: True, C: True})
assert dpll_satisfiable( A & B & C ) == {A: True, B: True, C: True}
assert dpll_satisfiable( (A | B) & (A >> B) ) == {B: True}
assert dpll_satisfiable( Equivalent(A, B) & A ) == {A: True, B: True}
assert dpll_satisfiable( Equivalent(A, B) & ~A ) == {A: False, B: False}
def test_satisfiable():
A, B, C = symbols('A,B,C')
assert satisfiable(A & (A >> B) & ~B) is False
def test_valid():
A, B, C = symbols('A,B,C')
assert valid(A >> (B >> A)) is True
assert valid((A >> (B >> C)) >> ((A >> B) >> (A >> C))) is True
assert valid((~B >> ~A) >> (A >> B)) is True
assert valid(A | B | C) is False
assert valid(A >> B) is False
def test_pl_true():
A, B, C = symbols('A,B,C')
assert pl_true(True) is True
assert pl_true( A & B, {A: True, B: True}) is True
assert pl_true( A | B, {A: True}) is True
assert pl_true( A | B, {B: True}) is True
assert pl_true( A | B, {A: None, B: True}) is True
assert pl_true( A >> B, {A: False}) is True
assert pl_true( A | B | ~C, {A: False, B: True, C: True}) is True
assert pl_true(Equivalent(A, B), {A: False, B: False}) is True
# test for false
assert pl_true(False) is False
assert pl_true( A & B, {A: False, B: False}) is False
assert pl_true( A & B, {A: False}) is False
assert pl_true( A & B, {B: False}) is False
assert pl_true( A | B, {A: False, B: False}) is False
#test for None
assert pl_true(B, {B: None}) is None
assert pl_true( A & B, {A: True, B: None}) is None
assert pl_true( A >> B, {A: True, B: None}) is None
assert pl_true(Equivalent(A, B), {A: None}) is None
assert pl_true(Equivalent(A, B), {A: True, B: None}) is None
# Test for deep
assert pl_true(A | B, {A: False}, deep=True) is None
assert pl_true(~A & ~B, {A: False}, deep=True) is None
assert pl_true(A | B, {A: False, B: False}, deep=True) is False
assert pl_true(A & B & (~A | ~B), {A: True}, deep=True) is False
assert pl_true((C >> A) >> (B >> A), {C: True}, deep=True) is True
def test_pl_true_wrong_input():
from sympy import pi
raises(ValueError, lambda: pl_true('John Cleese'))
raises(ValueError, lambda: pl_true(42 + pi + pi ** 2))
raises(ValueError, lambda: pl_true(42))
def test_entails():
A, B, C = symbols('A, B, C')
assert entails(A, [A >> B, ~B]) is False
assert entails(B, [Equivalent(A, B), A]) is True
assert entails((A >> B) >> (~A >> ~B)) is False
assert entails((A >> B) >> (~B >> ~A)) is True
def test_PropKB():
A, B, C = symbols('A,B,C')
kb = PropKB()
assert kb.ask(A >> B) is False
assert kb.ask(A >> (B >> A)) is True
kb.tell(A >> B)
kb.tell(B >> C)
assert kb.ask(A) is False
assert kb.ask(B) is False
assert kb.ask(C) is False
assert kb.ask(~A) is False
assert kb.ask(~B) is False
assert kb.ask(~C) is False
assert kb.ask(A >> C) is True
kb.tell(A)
assert kb.ask(A) is True
assert kb.ask(B) is True
assert kb.ask(C) is True
assert kb.ask(~C) is False
kb.retract(A)
assert kb.ask(C) is False
def test_propKB_tolerant():
""""tolerant to bad input"""
kb = PropKB()
A, B, C = symbols('A,B,C')
assert kb.ask(B) is False
def test_satisfiable_non_symbols():
x, y = symbols('x y')
assumptions = Q.zero(x*y)
facts = Implies(Q.zero(x*y), Q.zero(x) | Q.zero(y))
query = ~Q.zero(x) & ~Q.zero(y)
refutations = [
{Q.zero(x): True, Q.zero(x*y): True},
{Q.zero(y): True, Q.zero(x*y): True},
{Q.zero(x): True, Q.zero(y): True, Q.zero(x*y): True},
{Q.zero(x): True, Q.zero(y): False, Q.zero(x*y): True},
{Q.zero(x): False, Q.zero(y): True, Q.zero(x*y): True}]
assert not satisfiable(And(assumptions, facts, query), algorithm='dpll')
assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll') in refutations
assert not satisfiable(And(assumptions, facts, query), algorithm='dpll2')
assert satisfiable(And(assumptions, facts, ~query), algorithm='dpll2') in refutations
def test_satisfiable_bool():
from sympy.core.singleton import S
assert satisfiable(true) == {true: true}
assert satisfiable(S.true) == {true: true}
assert satisfiable(false) is False
assert satisfiable(S.false) is False
def test_satisfiable_all_models():
from sympy.abc import A, B
assert next(satisfiable(False, all_models=True)) is False
assert list(satisfiable((A >> ~A) & A , all_models=True)) == [False]
assert list(satisfiable(True, all_models=True)) == [{true: true}]
models = [{A: True, B: False}, {A: False, B: True}]
result = satisfiable(A ^ B, all_models=True)
models.remove(next(result))
models.remove(next(result))
raises(StopIteration, lambda: next(result))
assert not models
assert list(satisfiable(Equivalent(A, B), all_models=True)) == \
[{A: False, B: False}, {A: True, B: True}]
models = [{A: False, B: False}, {A: False, B: True}, {A: True, B: True}]
for model in satisfiable(A >> B, all_models=True):
models.remove(model)
assert not models
# This is a santiy test to check that only the required number
# of solutions are generated. The expr below has 2**100 - 1 models
# which would time out the test if all are generated at once.
from sympy import numbered_symbols
from sympy.logic.boolalg import Or
sym = numbered_symbols()
X = [next(sym) for i in range(100)]
result = satisfiable(Or(*X), all_models=True)
for i in range(10):
assert next(result)
| bsd-3-clause |
botify-labs/moto | tests/test_cloudformation/fixtures/route53_health_check.py | 8 | 1122 | from __future__ import unicode_literals
template = {
"Resources": {
"HostedZone": {
"Type": "AWS::Route53::HostedZone",
"Properties": {
"Name": "my_zone"
}
},
"my_health_check": {
"Type": "AWS::Route53::HealthCheck",
"Properties": {
"HealthCheckConfig": {
"FailureThreshold": 3,
"IPAddress": "10.0.0.4",
"Port": 80,
"RequestInterval": 10,
"ResourcePath": "/",
"Type": "HTTP",
}
}
},
"myDNSRecord": {
"Type": "AWS::Route53::RecordSet",
"Properties": {
"HostedZoneId": {"Ref": "HostedZone"},
"Comment": "DNS name for my instance.",
"Name": "my_record_set",
"Type": "A",
"TTL": "900",
"ResourceRecords": ["my.example.com"],
"HealthCheckId": {"Ref": "my_health_check"},
}
}
},
}
| apache-2.0 |
FHannes/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/textio.py | 102 | 62691 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Functions for text input, logging or text output.
@group Helpers:
HexDump,
HexInput,
HexOutput,
Color,
Table,
Logger
DebugLog
CrashDump
"""
__revision__ = "$Id$"
__all__ = [
'HexDump',
'HexInput',
'HexOutput',
'Color',
'Table',
'CrashDump',
'DebugLog',
'Logger',
]
import sys
from winappdbg import win32
from winappdbg import compat
from winappdbg.util import StaticClass
import re
import time
import struct
import traceback
#------------------------------------------------------------------------------
class HexInput (StaticClass):
"""
Static functions for user input parsing.
The counterparts for each method are in the L{HexOutput} class.
"""
@staticmethod
def integer(token):
"""
Convert numeric strings into integers.
@type token: str
@param token: String to parse.
@rtype: int
@return: Parsed integer value.
"""
token = token.strip()
neg = False
if token.startswith(compat.b('-')):
token = token[1:]
neg = True
if token.startswith(compat.b('0x')):
result = int(token, 16) # hexadecimal
elif token.startswith(compat.b('0b')):
result = int(token[2:], 2) # binary
elif token.startswith(compat.b('0o')):
result = int(token, 8) # octal
else:
try:
result = int(token) # decimal
except ValueError:
result = int(token, 16) # hexadecimal (no "0x" prefix)
if neg:
result = -result
return result
@staticmethod
def address(token):
"""
Convert numeric strings into memory addresses.
@type token: str
@param token: String to parse.
@rtype: int
@return: Parsed integer value.
"""
return int(token, 16)
@staticmethod
def hexadecimal(token):
"""
Convert a strip of hexadecimal numbers into binary data.
@type token: str
@param token: String to parse.
@rtype: str
@return: Parsed string value.
"""
token = ''.join([ c for c in token if c.isalnum() ])
if len(token) % 2 != 0:
raise ValueError("Missing characters in hex data")
data = ''
for i in compat.xrange(0, len(token), 2):
x = token[i:i+2]
d = int(x, 16)
s = struct.pack('<B', d)
data += s
return data
@staticmethod
def pattern(token):
"""
Convert an hexadecimal search pattern into a POSIX regular expression.
For example, the following pattern::
"B8 0? ?0 ?? ??"
Would match the following data::
"B8 0D F0 AD BA" # mov eax, 0xBAADF00D
@type token: str
@param token: String to parse.
@rtype: str
@return: Parsed string value.
"""
token = ''.join([ c for c in token if c == '?' or c.isalnum() ])
if len(token) % 2 != 0:
raise ValueError("Missing characters in hex data")
regexp = ''
for i in compat.xrange(0, len(token), 2):
x = token[i:i+2]
if x == '??':
regexp += '.'
elif x[0] == '?':
f = '\\x%%.1x%s' % x[1]
x = ''.join([ f % c for c in compat.xrange(0, 0x10) ])
regexp = '%s[%s]' % (regexp, x)
elif x[1] == '?':
f = '\\x%s%%.1x' % x[0]
x = ''.join([ f % c for c in compat.xrange(0, 0x10) ])
regexp = '%s[%s]' % (regexp, x)
else:
regexp = '%s\\x%s' % (regexp, x)
return regexp
@staticmethod
def is_pattern(token):
"""
Determine if the given argument is a valid hexadecimal pattern to be
used with L{pattern}.
@type token: str
@param token: String to parse.
@rtype: bool
@return:
C{True} if it's a valid hexadecimal pattern, C{False} otherwise.
"""
return re.match(r"^(?:[\?A-Fa-f0-9][\?A-Fa-f0-9]\s*)+$", token)
@classmethod
def integer_list_file(cls, filename):
"""
Read a list of integers from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list( int )
@return: List of integers read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
e = sys.exc_info()[1]
msg = "Error in line %d of %s: %s"
msg = msg % (count, filename, str(e))
raise ValueError(msg)
result.append(value)
return result
@classmethod
def string_list_file(cls, filename):
"""
Read a list of string values from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- strings cannot span over a single line
@type filename: str
@param filename: Name of the file to read.
@rtype: list
@return: List of integers and strings read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
result.append(line)
return result
@classmethod
def mixed_list_file(cls, filename):
"""
Read a list of mixed values from a file.
The file format is:
- # anywhere in the line begins a comment
- leading and trailing spaces are ignored
- empty lines are ignored
- strings cannot span over a single line
- integers can be specified as:
- decimal numbers ("100" is 100)
- hexadecimal numbers ("0x100" is 256)
- binary numbers ("0b100" is 4)
- octal numbers ("0100" is 64)
@type filename: str
@param filename: Name of the file to read.
@rtype: list
@return: List of integers and strings read from the file.
"""
count = 0
result = list()
fd = open(filename, 'r')
for line in fd:
count = count + 1
if '#' in line:
line = line[ : line.find('#') ]
line = line.strip()
if line:
try:
value = cls.integer(line)
except ValueError:
value = line
result.append(value)
return result
#------------------------------------------------------------------------------
class HexOutput (StaticClass):
"""
Static functions for user output parsing.
The counterparts for each method are in the L{HexInput} class.
@type integer_size: int
@cvar integer_size: Default size in characters of an outputted integer.
This value is platform dependent.
@type address_size: int
@cvar address_size: Default Number of bits of the target architecture.
This value is platform dependent.
"""
integer_size = (win32.SIZEOF(win32.DWORD) * 2) + 2
address_size = (win32.SIZEOF(win32.SIZE_T) * 2) + 2
@classmethod
def integer(cls, integer, bits = None):
"""
@type integer: int
@param integer: Integer.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
@rtype: str
@return: Text output.
"""
if bits is None:
integer_size = cls.integer_size
else:
integer_size = (bits / 4) + 2
if integer >= 0:
return ('0x%%.%dx' % (integer_size - 2)) % integer
return ('-0x%%.%dx' % (integer_size - 2)) % -integer
@classmethod
def address(cls, address, bits = None):
"""
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.address_size}
@rtype: str
@return: Text output.
"""
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = (bits / 4) + 2
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ('0x%%.%dx' % (address_size - 2)) % address
@staticmethod
def hexadecimal(data):
"""
Convert binary data to a string of hexadecimal numbers.
@type data: str
@param data: Binary data.
@rtype: str
@return: Hexadecimal representation.
"""
return HexDump.hexadecimal(data, separator = '')
@classmethod
def integer_list_file(cls, filename, values, bits = None):
"""
Write a list of integers to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.integer_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of integers to write to the file.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
"""
fd = open(filename, 'w')
for integer in values:
print >> fd, cls.integer(integer, bits)
fd.close()
@classmethod
def string_list_file(cls, filename, values):
"""
Write a list of strings to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.string_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of strings to write to the file.
"""
fd = open(filename, 'w')
for string in values:
print >> fd, string
fd.close()
@classmethod
def mixed_list_file(cls, filename, values, bits):
"""
Write a list of mixed values to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.mixed_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of mixed values to write to the file.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
"""
fd = open(filename, 'w')
for original in values:
try:
parsed = cls.integer(original, bits)
except TypeError:
parsed = repr(original)
print >> fd, parsed
fd.close()
#------------------------------------------------------------------------------
class HexDump (StaticClass):
"""
Static functions for hexadecimal dumps.
@type integer_size: int
@cvar integer_size: Size in characters of an outputted integer.
This value is platform dependent.
@type address_size: int
@cvar address_size: Size in characters of an outputted address.
This value is platform dependent.
"""
integer_size = (win32.SIZEOF(win32.DWORD) * 2)
address_size = (win32.SIZEOF(win32.SIZE_T) * 2)
@classmethod
def integer(cls, integer, bits = None):
"""
@type integer: int
@param integer: Integer.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.integer_size}
@rtype: str
@return: Text output.
"""
if bits is None:
integer_size = cls.integer_size
else:
integer_size = bits / 4
return ('%%.%dX' % integer_size) % integer
@classmethod
def address(cls, address, bits = None):
"""
@type address: int
@param address: Memory address.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text output.
"""
if bits is None:
address_size = cls.address_size
bits = win32.bits
else:
address_size = bits / 4
if address < 0:
address = ((2 ** bits) - 1) ^ ~address
return ('%%.%dX' % address_size) % address
@staticmethod
def printable(data):
"""
Replace unprintable characters with dots.
@type data: str
@param data: Binary data.
@rtype: str
@return: Printable text.
"""
result = ''
for c in data:
if 32 < ord(c) < 128:
result += c
else:
result += '.'
return result
@staticmethod
def hexadecimal(data, separator = ''):
"""
Convert binary data to a string of hexadecimal numbers.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@rtype: str
@return: Hexadecimal representation.
"""
return separator.join( [ '%.2x' % ord(c) for c in data ] )
@staticmethod
def hexa_word(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal WORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each WORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 1 != 0:
data += '\0'
return separator.join( [ '%.4x' % struct.unpack('<H', data[i:i+2])[0] \
for i in compat.xrange(0, len(data), 2) ] )
@staticmethod
def hexa_dword(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal DWORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 3 != 0:
data += '\0' * (4 - (len(data) & 3))
return separator.join( [ '%.8x' % struct.unpack('<L', data[i:i+4])[0] \
for i in compat.xrange(0, len(data), 4) ] )
@staticmethod
def hexa_qword(data, separator = ' '):
"""
Convert binary data to a string of hexadecimal QWORDs.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each QWORD.
@rtype: str
@return: Hexadecimal representation.
"""
if len(data) & 7 != 0:
data += '\0' * (8 - (len(data) & 7))
return separator.join( [ '%.16x' % struct.unpack('<Q', data[i:i+8])[0]\
for i in compat.xrange(0, len(data), 8) ] )
@classmethod
def hexline(cls, data, separator = ' ', width = None):
"""
Dump a line of hexadecimal numbers from binary data.
@type data: str
@param data: Binary data.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@rtype: str
@return: Multiline output text.
"""
if width is None:
fmt = '%s %s'
else:
fmt = '%%-%ds %%-%ds' % ((len(separator)+2)*width-1, width)
return fmt % (cls.hexadecimal(data, separator), cls.printable(data))
@classmethod
def hexblock(cls, data, address = None,
bits = None,
separator = ' ',
width = 8):
"""
Dump a block of hexadecimal numbers from binary data.
Also show a printable text version of the data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexline, data, address, bits, width,
cb_kwargs = {'width' : width, 'separator' : separator})
@classmethod
def hexblock_cb(cls, callback, data, address = None,
bits = None,
width = 16,
cb_args = (),
cb_kwargs = {}):
"""
Dump a block of binary data using a callback function to convert each
line of text.
@type callback: function
@param callback: Callback function to convert each line of data.
@type data: str
@param data: Binary data.
@type address: str
@param address:
(Optional) Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type cb_args: str
@param cb_args:
(Optional) Arguments to pass to the callback function.
@type cb_kwargs: str
@param cb_kwargs:
(Optional) Keyword arguments to pass to the callback function.
@type width: int
@param width:
(Optional) Maximum number of bytes to convert per text line.
@rtype: str
@return: Multiline output text.
"""
result = ''
if address is None:
for i in compat.xrange(0, len(data), width):
result = '%s%s\n' % ( result, \
callback(data[i:i+width], *cb_args, **cb_kwargs) )
else:
for i in compat.xrange(0, len(data), width):
result = '%s%s: %s\n' % (
result,
cls.address(address, bits),
callback(data[i:i+width], *cb_args, **cb_kwargs)
)
address += width
return result
@classmethod
def hexblock_byte(cls, data, address = None,
bits = None,
separator = ' ',
width = 16):
"""
Dump a block of hexadecimal BYTEs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each BYTE.
@type width: int
@param width:
(Optional) Maximum number of BYTEs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexadecimal, data,
address, bits, width,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_word(cls, data, address = None,
bits = None,
separator = ' ',
width = 8):
"""
Dump a block of hexadecimal WORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each WORD.
@type width: int
@param width:
(Optional) Maximum number of WORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_word, data,
address, bits, width * 2,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_dword(cls, data, address = None,
bits = None,
separator = ' ',
width = 4):
"""
Dump a block of hexadecimal DWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each DWORD.
@type width: int
@param width:
(Optional) Maximum number of DWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_dword, data,
address, bits, width * 4,
cb_kwargs = {'separator': separator})
@classmethod
def hexblock_qword(cls, data, address = None,
bits = None,
separator = ' ',
width = 2):
"""
Dump a block of hexadecimal QWORDs from binary data.
@type data: str
@param data: Binary data.
@type address: str
@param address: Memory address where the data was read from.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@type separator: str
@param separator:
Separator between the hexadecimal representation of each QWORD.
@type width: int
@param width:
(Optional) Maximum number of QWORDs to convert per text line.
@rtype: str
@return: Multiline output text.
"""
return cls.hexblock_cb(cls.hexa_qword, data,
address, bits, width * 8,
cb_kwargs = {'separator': separator})
#------------------------------------------------------------------------------
# TODO: implement an ANSI parser to simplify using colors
class Color (object):
"""
Colored console output.
"""
@staticmethod
def _get_text_attributes():
return win32.GetConsoleScreenBufferInfo().wAttributes
@staticmethod
def _set_text_attributes(wAttributes):
win32.SetConsoleTextAttribute(wAttributes = wAttributes)
#--------------------------------------------------------------------------
@classmethod
def can_use_colors(cls):
"""
Determine if we can use colors.
Colored output only works when the output is a real console, and fails
when redirected to a file or pipe. Call this method before issuing a
call to any other method of this class to make sure it's actually
possible to use colors.
@rtype: bool
@return: C{True} if it's possible to output text with color,
C{False} otherwise.
"""
try:
cls._get_text_attributes()
return True
except Exception:
return False
@classmethod
def reset(cls):
"Reset the colors to the default values."
cls._set_text_attributes(win32.FOREGROUND_GREY)
#--------------------------------------------------------------------------
#@classmethod
#def underscore(cls, on = True):
# wAttributes = cls._get_text_attributes()
# if on:
# wAttributes |= win32.COMMON_LVB_UNDERSCORE
# else:
# wAttributes &= ~win32.COMMON_LVB_UNDERSCORE
# cls._set_text_attributes(wAttributes)
#--------------------------------------------------------------------------
@classmethod
def default(cls):
"Make the current foreground color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def light(cls):
"Make the current foreground color light."
wAttributes = cls._get_text_attributes()
wAttributes |= win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def dark(cls):
"Make the current foreground color dark."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def black(cls):
"Make the text foreground color black."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
#wAttributes |= win32.FOREGROUND_BLACK
cls._set_text_attributes(wAttributes)
@classmethod
def white(cls):
"Make the text foreground color white."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
cls._set_text_attributes(wAttributes)
@classmethod
def red(cls):
"Make the text foreground color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_RED
cls._set_text_attributes(wAttributes)
@classmethod
def green(cls):
"Make the text foreground color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREEN
cls._set_text_attributes(wAttributes)
@classmethod
def blue(cls):
"Make the text foreground color blue."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_BLUE
cls._set_text_attributes(wAttributes)
@classmethod
def cyan(cls):
"Make the text foreground color cyan."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_CYAN
cls._set_text_attributes(wAttributes)
@classmethod
def magenta(cls):
"Make the text foreground color magenta."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_MAGENTA
cls._set_text_attributes(wAttributes)
@classmethod
def yellow(cls):
"Make the text foreground color yellow."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_YELLOW
cls._set_text_attributes(wAttributes)
#--------------------------------------------------------------------------
@classmethod
def bk_default(cls):
"Make the current background color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_light(cls):
"Make the current background color light."
wAttributes = cls._get_text_attributes()
wAttributes |= win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_dark(cls):
"Make the current background color dark."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_INTENSITY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_black(cls):
"Make the text background color black."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
#wAttributes |= win32.BACKGROUND_BLACK
cls._set_text_attributes(wAttributes)
@classmethod
def bk_white(cls):
"Make the text background color white."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_GREY
cls._set_text_attributes(wAttributes)
@classmethod
def bk_red(cls):
"Make the text background color red."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_RED
cls._set_text_attributes(wAttributes)
@classmethod
def bk_green(cls):
"Make the text background color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_GREEN
cls._set_text_attributes(wAttributes)
@classmethod
def bk_blue(cls):
"Make the text background color blue."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_BLUE
cls._set_text_attributes(wAttributes)
@classmethod
def bk_cyan(cls):
"Make the text background color cyan."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_CYAN
cls._set_text_attributes(wAttributes)
@classmethod
def bk_magenta(cls):
"Make the text background color magenta."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_MAGENTA
cls._set_text_attributes(wAttributes)
@classmethod
def bk_yellow(cls):
"Make the text background color yellow."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.BACKGROUND_MASK
wAttributes |= win32.BACKGROUND_YELLOW
cls._set_text_attributes(wAttributes)
#------------------------------------------------------------------------------
# TODO: another class for ASCII boxes
class Table (object):
"""
Text based table. The number of columns and the width of each column
is automatically calculated.
"""
def __init__(self, sep = ' '):
"""
@type sep: str
@param sep: Separator between cells in each row.
"""
self.__cols = list()
self.__width = list()
self.__sep = sep
def addRow(self, *row):
"""
Add a row to the table. All items are converted to strings.
@type row: tuple
@keyword row: Each argument is a cell in the table.
"""
row = [ str(item) for item in row ]
len_row = [ len(item) for item in row ]
width = self.__width
len_old = len(width)
len_new = len(row)
known = min(len_old, len_new)
missing = len_new - len_old
if missing > 0:
width.extend( len_row[ -missing : ] )
elif missing < 0:
len_row.extend( [0] * (-missing) )
self.__width = [ max( width[i], len_row[i] ) for i in compat.xrange(len(len_row)) ]
self.__cols.append(row)
def justify(self, column, direction):
"""
Make the text in a column left or right justified.
@type column: int
@param column: Index of the column.
@type direction: int
@param direction:
C{-1} to justify left,
C{1} to justify right.
@raise IndexError: Bad column index.
@raise ValueError: Bad direction value.
"""
if direction == -1:
self.__width[column] = abs(self.__width[column])
elif direction == 1:
self.__width[column] = - abs(self.__width[column])
else:
raise ValueError("Bad direction value.")
def getWidth(self):
"""
Get the width of the text output for the table.
@rtype: int
@return: Width in characters for the text output,
including the newline character.
"""
width = 0
if self.__width:
width = sum( abs(x) for x in self.__width )
width = width + len(self.__width) * len(self.__sep) + 1
return width
def getOutput(self):
"""
Get the text output for the table.
@rtype: str
@return: Text output.
"""
return '%s\n' % '\n'.join( self.yieldOutput() )
def yieldOutput(self):
"""
Generate the text output for the table.
@rtype: generator of str
@return: Text output.
"""
width = self.__width
if width:
num_cols = len(width)
fmt = ['%%%ds' % -w for w in width]
if width[-1] > 0:
fmt[-1] = '%s'
fmt = self.__sep.join(fmt)
for row in self.__cols:
row.extend( [''] * (num_cols - len(row)) )
yield fmt % tuple(row)
def show(self):
"""
Print the text output for the table.
"""
print(self.getOutput())
#------------------------------------------------------------------------------
class CrashDump (StaticClass):
"""
Static functions for crash dumps.
@type reg_template: str
@cvar reg_template: Template for the L{dump_registers} method.
"""
# Templates for the dump_registers method.
reg_template = {
win32.ARCH_I386 : (
'eax=%(Eax).8x ebx=%(Ebx).8x ecx=%(Ecx).8x edx=%(Edx).8x esi=%(Esi).8x edi=%(Edi).8x\n'
'eip=%(Eip).8x esp=%(Esp).8x ebp=%(Ebp).8x %(efl_dump)s\n'
'cs=%(SegCs).4x ss=%(SegSs).4x ds=%(SegDs).4x es=%(SegEs).4x fs=%(SegFs).4x gs=%(SegGs).4x efl=%(EFlags).8x\n'
),
win32.ARCH_AMD64 : (
'rax=%(Rax).16x rbx=%(Rbx).16x rcx=%(Rcx).16x\n'
'rdx=%(Rdx).16x rsi=%(Rsi).16x rdi=%(Rdi).16x\n'
'rip=%(Rip).16x rsp=%(Rsp).16x rbp=%(Rbp).16x\n'
' r8=%(R8).16x r9=%(R9).16x r10=%(R10).16x\n'
'r11=%(R11).16x r12=%(R12).16x r13=%(R13).16x\n'
'r14=%(R14).16x r15=%(R15).16x\n'
'%(efl_dump)s\n'
'cs=%(SegCs).4x ss=%(SegSs).4x ds=%(SegDs).4x es=%(SegEs).4x fs=%(SegFs).4x gs=%(SegGs).4x efl=%(EFlags).8x\n'
),
}
@staticmethod
def dump_flags(efl):
"""
Dump the x86 processor flags.
The output mimics that of the WinDBG debugger.
Used by L{dump_registers}.
@type efl: int
@param efl: Value of the eFlags register.
@rtype: str
@return: Text suitable for logging.
"""
if efl is None:
return ''
efl_dump = 'iopl=%1d' % ((efl & 0x3000) >> 12)
if efl & 0x100000:
efl_dump += ' vip'
else:
efl_dump += ' '
if efl & 0x80000:
efl_dump += ' vif'
else:
efl_dump += ' '
# 0x20000 ???
if efl & 0x800:
efl_dump += ' ov' # Overflow
else:
efl_dump += ' no' # No overflow
if efl & 0x400:
efl_dump += ' dn' # Downwards
else:
efl_dump += ' up' # Upwards
if efl & 0x200:
efl_dump += ' ei' # Enable interrupts
else:
efl_dump += ' di' # Disable interrupts
# 0x100 trap flag
if efl & 0x80:
efl_dump += ' ng' # Negative
else:
efl_dump += ' pl' # Positive
if efl & 0x40:
efl_dump += ' zr' # Zero
else:
efl_dump += ' nz' # Nonzero
if efl & 0x10:
efl_dump += ' ac' # Auxiliary carry
else:
efl_dump += ' na' # No auxiliary carry
# 0x8 ???
if efl & 0x4:
efl_dump += ' pe' # Parity odd
else:
efl_dump += ' po' # Parity even
# 0x2 ???
if efl & 0x1:
efl_dump += ' cy' # Carry
else:
efl_dump += ' nc' # No carry
return efl_dump
@classmethod
def dump_registers(cls, registers, arch = None):
"""
Dump the x86/x64 processor register values.
The output mimics that of the WinDBG debugger.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
@type arch: str
@param arch: Architecture of the machine whose registers were dumped.
Defaults to the current architecture.
Currently only the following architectures are supported:
- L{win32.ARCH_I386}
- L{win32.ARCH_AMD64}
@rtype: str
@return: Text suitable for logging.
"""
if registers is None:
return ''
if arch is None:
if 'Eax' in registers:
arch = win32.ARCH_I386
elif 'Rax' in registers:
arch = win32.ARCH_AMD64
else:
arch = 'Unknown'
if arch not in cls.reg_template:
msg = "Don't know how to dump the registers for architecture: %s"
raise NotImplementedError(msg % arch)
registers = registers.copy()
registers['efl_dump'] = cls.dump_flags( registers['EFlags'] )
return cls.reg_template[arch] % registers
@staticmethod
def dump_registers_peek(registers, data, separator = ' ', width = 16):
"""
Dump data pointed to by the given registers, if any.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
This value is returned by L{Thread.get_context}.
@type data: dict( str S{->} str )
@param data: Dictionary mapping register names to the data they point to.
This value is returned by L{Thread.peek_pointers_in_registers}.
@rtype: str
@return: Text suitable for logging.
"""
if None in (registers, data):
return ''
names = compat.keys(data)
names.sort()
result = ''
for reg_name in names:
tag = reg_name.lower()
dumped = HexDump.hexline(data[reg_name], separator, width)
result += '%s -> %s\n' % (tag, dumped)
return result
@staticmethod
def dump_data_peek(data, base = 0,
separator = ' ',
width = 16,
bits = None):
"""
Dump data from pointers guessed within the given binary data.
@type data: str
@param data: Dictionary mapping offsets to the data they point to.
@type base: int
@param base: Base offset.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
pointers = compat.keys(data)
pointers.sort()
result = ''
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
address = HexDump.address(base + offset, bits)
result += '%s -> %s\n' % (address, dumped)
return result
@staticmethod
def dump_stack_peek(data, separator = ' ', width = 16, arch = None):
"""
Dump data from pointers guessed within the given stack dump.
@type data: str
@param data: Dictionary mapping stack offsets to the data they point to.
@type separator: str
@param separator:
Separator between the hexadecimal representation of each character.
@type width: int
@param width:
(Optional) Maximum number of characters to convert per text line.
This value is also used for padding.
@type arch: str
@param arch: Architecture of the machine whose registers were dumped.
Defaults to the current architecture.
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
if arch is None:
arch = win32.arch
pointers = compat.keys(data)
pointers.sort()
result = ''
if pointers:
if arch == win32.ARCH_I386:
spreg = 'esp'
elif arch == win32.ARCH_AMD64:
spreg = 'rsp'
else:
spreg = 'STACK' # just a generic tag
tag_fmt = '[%s+0x%%.%dx]' % (spreg, len( '%x' % pointers[-1] ) )
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
tag = tag_fmt % offset
result += '%s -> %s\n' % (tag, dumped)
return result
@staticmethod
def dump_stack_trace(stack_trace, bits = None):
"""
Dump a stack trace, as returned by L{Thread.get_stack_trace} with the
C{bUseLabels} parameter set to C{False}.
@type stack_trace: list( int, int, str )
@param stack_trace: Stack trace as a list of tuples of
( return address, frame pointer, module filename )
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not stack_trace:
return ''
table = Table()
table.addRow('Frame', 'Origin', 'Module')
for (fp, ra, mod) in stack_trace:
fp_d = HexDump.address(fp, bits)
ra_d = HexDump.address(ra, bits)
table.addRow(fp_d, ra_d, mod)
return table.getOutput()
@staticmethod
def dump_stack_trace_with_labels(stack_trace, bits = None):
"""
Dump a stack trace,
as returned by L{Thread.get_stack_trace_with_labels}.
@type stack_trace: list( int, int, str )
@param stack_trace: Stack trace as a list of tuples of
( return address, frame pointer, module filename )
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not stack_trace:
return ''
table = Table()
table.addRow('Frame', 'Origin')
for (fp, label) in stack_trace:
table.addRow( HexDump.address(fp, bits), label )
return table.getOutput()
# TODO
# + Instead of a star when EIP points to, it would be better to show
# any register value (or other values like the exception address) that
# points to a location in the dissassembled code.
# + It'd be very useful to show some labels here.
# + It'd be very useful to show register contents for code at EIP
@staticmethod
def dump_code(disassembly, pc = None,
bLowercase = True,
bits = None):
"""
Dump a disassembly. Optionally mark where the program counter is.
@type disassembly: list of tuple( int, int, str, str )
@param disassembly: Disassembly dump as returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type pc: int
@param pc: (Optional) Program counter.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not disassembly:
return ''
table = Table(sep = ' | ')
for (addr, size, code, dump) in disassembly:
if bLowercase:
code = code.lower()
if addr == pc:
addr = ' * %s' % HexDump.address(addr, bits)
else:
addr = ' %s' % HexDump.address(addr, bits)
table.addRow(addr, dump, code)
table.justify(1, 1)
return table.getOutput()
@staticmethod
def dump_code_line(disassembly_line, bShowAddress = True,
bShowDump = True,
bLowercase = True,
dwDumpWidth = None,
dwCodeWidth = None,
bits = None):
"""
Dump a single line of code. To dump a block of code use L{dump_code}.
@type disassembly_line: tuple( int, int, str, str )
@param disassembly_line: Single item of the list returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type bShowAddress: bool
@param bShowAddress: (Optional) If C{True} show the memory address.
@type bShowDump: bool
@param bShowDump: (Optional) If C{True} show the hexadecimal dump.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type dwDumpWidth: int or None
@param dwDumpWidth: (Optional) Width in characters of the hex dump.
@type dwCodeWidth: int or None
@param dwCodeWidth: (Optional) Width in characters of the code.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if bits is None:
address_size = HexDump.address_size
else:
address_size = bits / 4
(addr, size, code, dump) = disassembly_line
dump = dump.replace(' ', '')
result = list()
fmt = ''
if bShowAddress:
result.append( HexDump.address(addr, bits) )
fmt += '%%%ds:' % address_size
if bShowDump:
result.append(dump)
if dwDumpWidth:
fmt += ' %%-%ds' % dwDumpWidth
else:
fmt += ' %s'
if bLowercase:
code = code.lower()
result.append(code)
if dwCodeWidth:
fmt += ' %%-%ds' % dwCodeWidth
else:
fmt += ' %s'
return fmt % tuple(result)
@staticmethod
def dump_memory_map(memoryMap, mappedFilenames = None, bits = None):
"""
Dump the memory map of a process. Optionally show the filenames for
memory mapped files as well.
@type memoryMap: list( L{win32.MemoryBasicInformation} )
@param memoryMap: Memory map returned by L{Process.get_memory_map}.
@type mappedFilenames: dict( int S{->} str )
@param mappedFilenames: (Optional) Memory mapped filenames
returned by L{Process.get_mapped_filenames}.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if not memoryMap:
return ''
table = Table()
if mappedFilenames:
table.addRow("Address", "Size", "State", "Access", "Type", "File")
else:
table.addRow("Address", "Size", "State", "Access", "Type")
# For each memory block in the map...
for mbi in memoryMap:
# Address and size of memory block.
BaseAddress = HexDump.address(mbi.BaseAddress, bits)
RegionSize = HexDump.address(mbi.RegionSize, bits)
# State (free or allocated).
mbiState = mbi.State
if mbiState == win32.MEM_RESERVE:
State = "Reserved"
elif mbiState == win32.MEM_COMMIT:
State = "Commited"
elif mbiState == win32.MEM_FREE:
State = "Free"
else:
State = "Unknown"
# Page protection bits (R/W/X/G).
if mbiState != win32.MEM_COMMIT:
Protect = ""
else:
mbiProtect = mbi.Protect
if mbiProtect & win32.PAGE_NOACCESS:
Protect = "--- "
elif mbiProtect & win32.PAGE_READONLY:
Protect = "R-- "
elif mbiProtect & win32.PAGE_READWRITE:
Protect = "RW- "
elif mbiProtect & win32.PAGE_WRITECOPY:
Protect = "RC- "
elif mbiProtect & win32.PAGE_EXECUTE:
Protect = "--X "
elif mbiProtect & win32.PAGE_EXECUTE_READ:
Protect = "R-X "
elif mbiProtect & win32.PAGE_EXECUTE_READWRITE:
Protect = "RWX "
elif mbiProtect & win32.PAGE_EXECUTE_WRITECOPY:
Protect = "RCX "
else:
Protect = "??? "
if mbiProtect & win32.PAGE_GUARD:
Protect += "G"
else:
Protect += "-"
if mbiProtect & win32.PAGE_NOCACHE:
Protect += "N"
else:
Protect += "-"
if mbiProtect & win32.PAGE_WRITECOMBINE:
Protect += "W"
else:
Protect += "-"
# Type (file mapping, executable image, or private memory).
mbiType = mbi.Type
if mbiType == win32.MEM_IMAGE:
Type = "Image"
elif mbiType == win32.MEM_MAPPED:
Type = "Mapped"
elif mbiType == win32.MEM_PRIVATE:
Type = "Private"
elif mbiType == 0:
Type = ""
else:
Type = "Unknown"
# Output a row in the table.
if mappedFilenames:
FileName = mappedFilenames.get(mbi.BaseAddress, '')
table.addRow( BaseAddress, RegionSize, State, Protect, Type, FileName )
else:
table.addRow( BaseAddress, RegionSize, State, Protect, Type )
# Return the table output.
return table.getOutput()
#------------------------------------------------------------------------------
class DebugLog (StaticClass):
'Static functions for debug logging.'
@staticmethod
def log_text(text):
"""
Log lines of text, inserting a timestamp.
@type text: str
@param text: Text to log.
@rtype: str
@return: Log line.
"""
if text.endswith('\n'):
text = text[:-len('\n')]
#text = text.replace('\n', '\n\t\t') # text CSV
ltime = time.strftime("%X")
msecs = (time.time() % 1) * 1000
return '[%s.%04d] %s' % (ltime, msecs, text)
#return '[%s.%04d]\t%s' % (ltime, msecs, text) # text CSV
@classmethod
def log_event(cls, event, text = None):
"""
Log lines of text associated with a debug event.
@type event: L{Event}
@param event: Event object.
@type text: str
@param text: (Optional) Text to log. If no text is provided the default
is to show a description of the event itself.
@rtype: str
@return: Log line.
"""
if not text:
if event.get_event_code() == win32.EXCEPTION_DEBUG_EVENT:
what = event.get_exception_description()
if event.is_first_chance():
what = '%s (first chance)' % what
else:
what = '%s (second chance)' % what
try:
address = event.get_fault_address()
except NotImplementedError:
address = event.get_exception_address()
else:
what = event.get_event_name()
address = event.get_thread().get_pc()
process = event.get_process()
label = process.get_label_at_address(address)
address = HexDump.address(address, process.get_bits())
if label:
where = '%s (%s)' % (address, label)
else:
where = address
text = '%s at %s' % (what, where)
text = 'pid %d tid %d: %s' % (event.get_pid(), event.get_tid(), text)
#text = 'pid %d tid %d:\t%s' % (event.get_pid(), event.get_tid(), text) # text CSV
return cls.log_text(text)
#------------------------------------------------------------------------------
class Logger(object):
"""
Logs text to standard output and/or a text file.
@type logfile: str or None
@ivar logfile: Append messages to this text file.
@type verbose: bool
@ivar verbose: C{True} to print messages to standard output.
@type fd: file
@ivar fd: File object where log messages are printed to.
C{None} if no log file is used.
"""
def __init__(self, logfile = None, verbose = True):
"""
@type logfile: str or None
@param logfile: Append messages to this text file.
@type verbose: bool
@param verbose: C{True} to print messages to standard output.
"""
self.verbose = verbose
self.logfile = logfile
if self.logfile:
self.fd = open(self.logfile, 'a+')
def __logfile_error(self, e):
"""
Shows an error message to standard error
if the log file can't be written to.
Used internally.
@type e: Exception
@param e: Exception raised when trying to write to the log file.
"""
from sys import stderr
msg = "Warning, error writing log file %s: %s\n"
msg = msg % (self.logfile, str(e))
stderr.write(DebugLog.log_text(msg))
self.logfile = None
self.fd = None
def __do_log(self, text):
"""
Writes the given text verbatim into the log file (if any)
and/or standard input (if the verbose flag is turned on).
Used internally.
@type text: str
@param text: Text to print.
"""
if isinstance(text, compat.unicode):
text = text.encode('cp1252')
if self.verbose:
print(text)
if self.logfile:
try:
self.fd.writelines('%s\n' % text)
except IOError:
e = sys.exc_info()[1]
self.__logfile_error(e)
def log_text(self, text):
"""
Log lines of text, inserting a timestamp.
@type text: str
@param text: Text to log.
"""
self.__do_log( DebugLog.log_text(text) )
def log_event(self, event, text = None):
"""
Log lines of text associated with a debug event.
@type event: L{Event}
@param event: Event object.
@type text: str
@param text: (Optional) Text to log. If no text is provided the default
is to show a description of the event itself.
"""
self.__do_log( DebugLog.log_event(event, text) )
def log_exc(self):
"""
Log lines of text associated with the last Python exception.
"""
self.__do_log( 'Exception raised: %s' % traceback.format_exc() )
def is_enabled(self):
"""
Determines if the logger will actually print anything when the log_*
methods are called.
This may save some processing if the log text requires a lengthy
calculation to prepare. If no log file is set and stdout logging
is disabled, there's no point in preparing a log text that won't
be shown to anyone.
@rtype: bool
@return: C{True} if a log file was set and/or standard output logging
is enabled, or C{False} otherwise.
"""
return self.verbose or self.logfile
| apache-2.0 |
ndp-systemes/odoo-addons | fix_export_rows/fix_export_rows.py | 1 | 6461 | # -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp import models, api
from openerp.models import BaseModel
class BaseModelExtend(models.BaseModel):
_name = 'basemodel.extend'
def _register_hook(self, cr):
@api.multi
def new_export_rows(self, fields):
""" Export fields of the records in ``self``.
:param fields: list of lists of fields to traverse
:return: list of lists of corresponding values
"""
lines = []
for record in self:
# main line of record, initially empty
current = [''] * len(fields)
lines.append(current)
# list of primary fields followed by secondary field(s)
primary_done = []
# process column by column
for i, path in enumerate(fields):
if not path:
continue
name = path[0]
if name in primary_done:
continue
if name == '.id':
current[i] = str(record.id)
elif name == 'id':
current[i] = record.export_xml_id()
else:
field = record._fields[name]
value = record[name]
# this part could be simpler, but it has to be done this way
# in order to reproduce the former behavior
if not isinstance(value, BaseModel):
current[i] = field.convert_to_export(value, self.env)
else:
primary_done.append(name)
# This is a special case, its strange behavior is intended!
if field.type == 'many2many' and len(path) > 1 and path[1] == 'id':
xml_ids = [r.export_xml_id() for r in value]
current[i] = ','.join(xml_ids) or False
continue
# recursively export the fields that follow name
fields2 = [(p[1:] if p and p[0] == name else []) for p in fields]
lines2 = value.new_export_rows(fields2)
if lines2:
# merge first line with record's main line
for j, val in enumerate(lines2[0]):
if val or isinstance(val, bool) or isinstance(val, int) or isinstance(val, float):
current[j] = val
# check value of current field
if not current[i] and not isinstance(current[i], bool) and \
not isinstance(current[i], int) and not isinstance(current[i], float):
# assign xml_ids, and forget about remaining lines
xml_ids = [item[1] for item in value.name_get()]
current[i] = ','.join(xml_ids)
else:
# append the other lines at the end
lines += lines2[1:]
else:
current[i] = False
return lines
@api.multi
def new_export_data(self, fields_to_export, raw_data=False):
""" Export fields for selected objects
:param fields_to_export: list of fields
:param raw_data: True to return value in native Python type
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
fields_to_export = map(models.fix_import_export_id_paths, fields_to_export)
if raw_data:
self = self.with_context(export_raw_data=True)
return {'datas': self.new_export_rows(fields_to_export)}
def export_xml_id(self):
""" Return a valid xml_id for the record ``self``. """
if not self._is_an_ordinary_table():
raise Exception(
"You can not export the column ID of model %s, because the "
"table %s is not an ordinary table."
% (self._name, self._table))
ir_model_data = self.sudo().env['ir.model.data']
data = ir_model_data.search([('model', '=', self._name), ('res_id', '=', self.id)])
if data:
if data[0].module:
return '%s.%s' % (data[0].module, data[0].name)
else:
return data[0].name
else:
postfix = 0
name = '%s_%s' % (self._table, self.id)
while ir_model_data.search([('module', '=', '__export__'), ('name', '=', name)]):
postfix += 1
name = '%s_%s_%s' % (self._table, self.id, postfix)
ir_model_data.create({
'model': self._name,
'res_id': self.id,
'module': '__export__',
'name': name,
})
return '__export__.' + name
BaseModel.export_data = new_export_data
BaseModel.new_export_rows = new_export_rows
BaseModel.export_xml_id = export_xml_id
return super(BaseModelExtend, self)._register_hook(cr)
| agpl-3.0 |
rcarmo/yaki-gae | lib/pygments/formatters/other.py | 363 | 3811 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| mit |
programulya/three.js | utils/exporters/blender/modules/msgpack/__init__.py | 659 | 1385 | # coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| mit |
tbinjiayou/Odoo | openerp/tools/pdf_utils.py | 456 | 3659 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zero-ui/miniblink49 | v8_7_5/testing/gmock/scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = '[email protected] (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| gpl-3.0 |
andresriancho/qotd | qotd/qotd.py | 1 | 6626 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 Yuen Ho Wong
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import re
import os
import random
import copy
import time
from BaseHTTPServer import BaseHTTPRequestHandler
from datetime import datetime, date
from urllib2 import urlopen, URLError
from cPickle import dump, load, HIGHEST_PROTOCOL
from xml.sax import make_parser, SAXException
from xml.sax.handler import ContentHandler, property_xml_string
DOT_FILE_PATH = os.path.join(os.path.expanduser("~"), ".qotd")
class Quote(object):
def __init__(self, author=None, content=None, pubDate=None, printed=False):
self.author = author
self.content = content
self.pubDate = pubDate
self.printed = printed
def __cmp__(self, other):
if self.pubDate < other.pubDate:
return -1
elif self.pubDate == other.pubDate:
return 0
elif self.pubDate > other.pubDate:
return 1
def __str__(self):
# Replace the Unicode replacement character often found in the RSS feed
# with a space and then encode the Unicode string to a UTF8 byte string
# as per the __str__ protocol demands.
return ((self.content or u'') + u'\n-- ' + (self.author or u'')).replace(u'\ufffd', u' ').encode('utf8')
class QOTDFeedHandler(ContentHandler):
def __init__(self):
self.quotes = []
self.quote = None
self.in_item = False
self.in_title = False
self.in_description = False
self.in_pubDate = False
self.lexing_tag = False
self.in_tag = False
# Hack to recover from a few unrecognized
# unicode chars bombing out the whole script
self.has_valid_content = False
def startElement(self, name, attrs):
if name == 'item':
self.in_item = True
self.quote = Quote()
elif name == 'title' and self.in_item:
self.in_title = True
elif name == 'description' and self.in_item:
self.in_description = True
elif name == 'pubDate' and self.in_item:
self.in_pubDate = True
def characters(self, content):
if self.in_item:
if self.in_title:
self.quote.author = content
elif self.in_pubDate:
st_time = time.strptime(content, u"%a, %d %b %Y %H:%M:%S %Z")
self.quote.pubDate = date.fromtimestamp(time.mktime(st_time))
elif self.in_description:
if content == '<':
self.lexing_tag = True
elif content == '>':
self.lexing_tag = False
elif content.isspace():
return
elif content.startswith('/'):
if self.lexing_tag:
self.in_tag = False
else:
self.in_tag = True
else:
if self.in_tag:
return
elif not self.lexing_tag and not self.in_tag:
l = re.findall(ur'".+"', content)
if l:
self.quote.content = l[0]
self.has_valid_content = True
def endElement(self, name):
if name == 'item':
self.in_item = False
if self.has_valid_content:
self.quotes.append(self.quote)
self.has_valid_content = False
elif name == 'title' and self.in_item:
self.in_title = False
elif name == 'description' and self.in_item:
self.in_description = False
elif name == 'pubDate' and self.in_item:
self.in_pubDate = False
def update_quotes_cache():
quotes = None
resp = urlopen("http://www.quotationspage.com/data/qotd.rss")
parser = make_parser()
handler = QOTDFeedHandler()
parser.setContentHandler(handler)
try:
parser.parse(resp)
except SAXException, e:
print 'Error parsing qotd.rss', e
pass
else:
quotes = copy.deepcopy(handler.quotes)
resp = urlopen('http://www.quotationspage.com/data/mqotd.rss')
parser = make_parser()
handler = QOTDFeedHandler()
parser.setContentHandler(handler)
try:
parser.parse(resp)
except SAXException, e:
print 'Error parsng mqotd.rss', e
pass
quotes += handler.quotes
# Cache the quotes for a day
with open(DOT_FILE_PATH, 'w') as dotfile:
dump(quotes, dotfile, HIGHEST_PROTOCOL)
return quotes
def main():
dotfilestatinfo = None
try:
dotfilestatinfo = os.stat(DOT_FILE_PATH)
except (IOError, OSError):
try:
print random.choice(update_quotes_cache())
return 0
except URLError, e:
if hasattr(e, 'code'):
print 'QuoteOfTheDay: Error %d: %s' % (e.code, BaseHTTPRequestHandler.responses[e.code][1])
elif hasattr(e, 'reason'):
print 'QuoteOfTheDay: IO or URL Error: ' + str(e.reason)
return 1
lastmod = datetime.fromtimestamp(dotfilestatinfo.st_mtime)
if (datetime.now() - lastmod).days < 1:
with open(DOT_FILE_PATH) as dotfile:
print random.choice(load(dotfile))
else:
try:
print random.choice(update_quotes_cache())
except URLError, e:
if hasattr(e, 'code'):
print 'QuoteOfTheDay: Error %d: %s' % (e.code, BaseHTTPRequestHandler.responses[e.code][1])
elif hasattr(e, 'reason'):
print 'QuoteOfTheDay: IO or URL Error: ' + str(e.reason)
return 1
return 0
| gpl-3.0 |
ralphwort/chef-repo | build/python-neutronclient/pbr-0.8.2-py2.7.egg/pbr/hooks/files.py | 44 | 3600 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from pbr import find_package
from pbr.hooks import base
def get_manpath():
manpath = 'share/man'
if os.path.exists(os.path.join(sys.prefix, 'man')):
# This works around a bug with install where it expects every node
# in the relative data directory to be an actual directory, since at
# least Debian derivatives (and probably other platforms as well)
# like to symlink Unixish /usr/local/man to /usr/local/share/man.
manpath = 'man'
return manpath
def get_man_section(section):
return os.path.join(get_manpath(), 'man%s' % section)
class FilesConfig(base.BaseConfig):
section = 'files'
def __init__(self, config, name):
super(FilesConfig, self).__init__(config)
self.name = name
self.data_files = self.config.get('data_files', '')
def save(self):
self.config['data_files'] = self.data_files
super(FilesConfig, self).save()
def expand_globs(self):
finished = []
for line in self.data_files.split("\n"):
if line.rstrip().endswith('*') and '=' in line:
(target, source_glob) = line.split('=')
source_prefix = source_glob.strip()[:-1]
target = target.strip()
if not target.endswith(os.path.sep):
target += os.path.sep
for (dirpath, dirnames, fnames) in os.walk(source_prefix):
finished.append(
"%s = " % dirpath.replace(source_prefix, target))
finished.extend(
[" %s" % os.path.join(dirpath, f) for f in fnames])
else:
finished.append(line)
self.data_files = "\n".join(finished)
def add_man_path(self, man_path):
self.data_files = "%s\n%s =" % (self.data_files, man_path)
def add_man_page(self, man_page):
self.data_files = "%s\n %s" % (self.data_files, man_page)
def get_man_sections(self):
man_sections = dict()
manpages = self.pbr_config['manpages']
for manpage in manpages.split():
section_number = manpage.strip()[-1]
section = man_sections.get(section_number, list())
section.append(manpage.strip())
man_sections[section_number] = section
return man_sections
def hook(self):
package = self.config.get('packages', self.name).strip()
if os.path.isdir(package):
self.config['packages'] = find_package.smart_find_packages(package)
self.expand_globs()
if 'manpages' in self.pbr_config:
man_sections = self.get_man_sections()
for (section, pages) in man_sections.items():
manpath = get_man_section(section)
self.add_man_path(manpath)
for page in pages:
self.add_man_page(page)
| apache-2.0 |
Semi-global/edx-platform | lms/djangoapps/lms_xblock/migrations/0001_initial.py | 110 | 4883 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XBlockAsidesConfig'
db.create_table('lms_xblock_xblockasidesconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('disabled_blocks', self.gf('django.db.models.fields.TextField')(default='about course_info static_tab')),
))
db.send_create_signal('lms_xblock', ['XBlockAsidesConfig'])
def backwards(self, orm):
# Deleting model 'XBlockAsidesConfig'
db.delete_table('lms_xblock_xblockasidesconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lms_xblock.xblockasidesconfig': {
'Meta': {'object_name': 'XBlockAsidesConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'disabled_blocks': ('django.db.models.fields.TextField', [], {'default': "'about course_info static_tab'"}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['lms_xblock'] | agpl-3.0 |
dlazz/ansible | lib/ansible/modules/network/f5/bigip_policy.py | 14 | 36820 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_policy
short_description: Manage general policy configuration on a BIG-IP
description:
- Manages general policy configuration on a BIG-IP. This module is best
used in conjunction with the C(bigip_policy_rule) module. This module
can handle general configuration like setting the draft state of the policy,
the description, and things unrelated to the policy rules themselves.
It is also the first module that should be used when creating rules as
the C(bigip_policy_rule) module requires a policy parameter.
version_added: 2.5
options:
description:
description:
- The description to attach to the policy.
- This parameter is only supported on versions of BIG-IP >= 12.1.0. On earlier
versions it will simply be ignored.
name:
description:
- The name of the policy to create.
required: True
state:
description:
- When C(state) is C(present), ensures that the policy exists and is
published. When C(state) is C(absent), ensures that the policy is removed,
even if it is currently drafted.
- When C(state) is C(draft), ensures that the policy exists and is drafted.
When modifying rules, it is required that policies first be in a draft.
- Drafting is only supported on versions of BIG-IP >= 12.1.0. On versions
prior to that, specifying a C(state) of C(draft) will raise an error.
choices:
- present
- absent
- draft
default: present
strategy:
description:
- Specifies the method to determine which actions get executed in the
case where there are multiple rules that match. When creating new
policies, the default is C(first).
- This module does not allow you to specify the C(best) strategy to use.
It will choose the system default (C(/Common/best-match)) for you instead.
choices:
- first
- all
- best
rules:
description:
- Specifies a list of rules that you want associated with this policy.
The order of this list is the order they will be evaluated by BIG-IP.
If the specified rules do not exist (for example when creating a new
policy) then they will be created.
- The C(conditions) for a default rule are C(all).
- The C(actions) for a default rule are C(ignore).
- The C(bigip_policy_rule) module can be used to create and edit existing
and new rules.
partition:
description:
- Device partition to manage resources on.
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create policy which is immediately published
bigip_policy:
name: Policy-Foo
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add a rule to the new policy - Immediately published
bigip_policy_rule:
policy: Policy-Foo
name: ABC
conditions:
- type: http_uri
path_starts_with:
- /ABC
- foo
- bar
path_ends_with:
- baz
actions:
- forward: yes
select: yes
pool: pool-svrs
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add multiple rules to the new policy - Added in the order they are specified
bigip_policy_rule:
policy: Policy-Foo
name: "{{ item.name }}"
conditions: "{{ item.conditions }}"
actions: "{{ item.actions }}"
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
loop:
- name: rule1
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_starts_with: /euro
- name: HomePage
actions:
- type: forward
pool: pool-svrs
conditions:
- type: http_uri
path_starts_with: /HomePage/
- name: Create policy specify default rules - Immediately published
bigip_policy:
name: Policy-Bar
state: present
rules:
- rule1
- rule2
- rule3
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create policy specify default rules - Left in a draft
bigip_policy:
name: Policy-Baz
state: draft
rules:
- rule1
- rule2
- rule3
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
strategy:
description: The new strategy set on the policy.
returned: changed and success
type: int
sample: first-match
description:
description:
- The new description of the policy.
- This value is only returned for BIG-IP devices >= 12.1.0.
returned: changed and success
type: str
sample: This is my description
rules:
description: List of the rules, and their order, applied to the policy.
returned: changed and success
type: list
sample: ['/Common/rule1', '/Common/rule2']
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import tmos_version
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import tmos_version
class Parameters(AnsibleF5Parameters):
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def strategy(self):
if self._values['strategy'] is None:
return None
# Look for 'first' from Ansible or REST
elif self._values['strategy'] == 'first':
return self._get_builtin_strategy('first')
elif 'first-match' in self._values['strategy']:
return str(self._values['strategy'])
# Look for 'all' from Ansible or REST
elif self._values['strategy'] == 'all':
return self._get_builtin_strategy('all')
elif 'all-match' in self._values['strategy']:
return str(self._values['strategy'])
else:
# Look for 'best' from Ansible or REST
if self._values['strategy'] == 'best':
return self._get_builtin_strategy('best')
elif 'best-match' in self._values['strategy']:
return str(self._values['strategy'])
else:
# These are custom strategies. The strategy may include the
# partition, but if it does not, then we add the partition
# that is provided to the module.
return self._get_custom_strategy_name()
def _get_builtin_strategy(self, strategy):
return '/Common/{0}-match'.format(strategy)
def _get_custom_strategy_name(self):
strategy = self._values['strategy']
if re.match(r'(\/[a-zA-Z_0-9.-]+){2}', strategy):
return strategy
elif re.match(r'[a-zA-Z_0-9.-]+', strategy):
return '/{0}/{1}'.format(self.partition, strategy)
else:
raise F5ModuleError(
"The provided strategy name is invalid!"
)
@property
def rules(self):
if self._values['rules'] is None:
return None
# In case rule values are unicode (as they may be coming from the API
result = [str(x) for x in self._values['rules']]
return result
class SimpleParameters(Parameters):
api_attributes = [
'strategy',
]
updatables = [
'strategy',
'rules',
]
returnables = [
'strategy',
'rules',
]
class ComplexParameters(Parameters):
api_attributes = [
'strategy',
'description',
]
updatables = [
'strategy',
'description',
'rules',
]
returnables = [
'strategy',
'description',
'rules',
]
class SimpleChanges(SimpleParameters):
api_attributes = [
'strategy'
]
updatables = [
'strategy', 'rules'
]
returnables = [
'strategy', 'rules'
]
class ComplexChanges(ComplexParameters):
api_attributes = [
'strategy', 'description'
]
updatables = [
'strategy', 'description', 'rules'
]
returnables = [
'strategy', 'description', 'rules'
]
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = Parameters(params=self.module.params)
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__deprecated', [])
if self.have:
warnings += self.have._values.get('__deprecated', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _announce_warnings(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warning', [])
if self.have:
warnings += self.have._values.get('__warning', [])
for warning in warnings:
self.module.warn(warning['msg'])
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _validate_creation_parameters(self):
if self.want.strategy is None:
self.want.update(dict(strategy='first'))
def _get_rule_names(self, rules):
if 'items' in rules:
rules['items'].sort(key=lambda x: x['ordinal'])
result = [x['name'] for x in rules['items']]
return result
else:
return []
def _read_rule_from_device(self, rule_name, draft=False):
if draft:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
rule_name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['ordinal']
def _create_rule_on_device(self, rule_name, idx, draft=False):
params = dict(name=rule_name, ordinal=idx)
if draft:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def _modify_rule_on_device(self, rule_name, idx, draft=False):
params = dict(ordinal=idx)
if draft:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
rule_name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
self.want.name
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def _rule_exists_on_device(self, rule_name, draft=False):
if draft:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
rule_name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def _remove_rule_on_device(self, rule_name, draft=False):
if draft:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
rule_name
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
self.want.name
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def _upsert_policy_rules_on_device(self, draft=False):
rules = self.changes.rules
if rules is None:
rules = []
for idx, rule in enumerate(rules):
if self._rule_exists_on_device(rule, draft):
ordinal = self._read_rule_from_device(rule, draft)
if int(ordinal) != idx:
self._modify_rule_on_device(rule, idx, draft)
else:
self._create_rule_on_device(rule, idx, draft)
self._remove_rule_difference(rules, draft)
def _remove_rule_difference(self, rules, draft=False):
if not rules or not self.have.rules:
return
have_rules = set(self.have.rules)
want_rules = set(rules)
removable = have_rules.difference(want_rules)
for remove in removable:
self._remove_rule_on_device(remove, draft)
class SimpleManager(BaseManager):
def __init__(self, *args, **kwargs):
super(SimpleManager, self).__init__(**kwargs)
self.want = SimpleParameters(params=self.module.params)
self.have = SimpleParameters()
self.changes = SimpleChanges()
def _set_changed_options(self):
changed = {}
for key in SimpleParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = SimpleChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = SimpleParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = SimpleChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == 'draft':
raise F5ModuleError(
"The 'draft' status is not available on BIG-IP versions < 12.1.0"
)
if state == 'present':
changed = self.present()
elif state == 'absent':
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
self._announce_warnings()
return result
def create(self):
self._validate_creation_parameters()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the policy")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
query = "?expandSubcollections=true"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
rules = self._get_rule_names(response['rulesReference'])
result = SimpleParameters(params=response)
result.update(dict(rules=rules))
return result
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self._upsert_policy_rules_on_device()
def create_on_device(self):
params = self.want.api_params()
payload = dict(
name=self.want.name,
partition=self.want.partition,
**params
)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self._upsert_policy_rules_on_device()
return True
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ComplexManager(BaseManager):
def __init__(self, *args, **kwargs):
super(ComplexManager, self).__init__(**kwargs)
self.want = ComplexParameters(params=self.module.params)
self.have = ComplexParameters()
self.changes = ComplexChanges()
def _set_changed_options(self):
changed = {}
for key in ComplexParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = ComplexChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ComplexParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = ComplexChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state in ["present", "draft"]:
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
drafted = self.draft_status_changed()
if any(x is True for x in [result, drafted]):
return True
return False
def draft_status_changed(self):
if self.draft_exists() and self.want.state == 'draft':
drafted = False
elif not self.draft_exists() and self.want.state == 'present':
drafted = False
else:
drafted = True
return drafted
def present(self):
if self.draft_exists() or self.policy_exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.draft_exists() or self.policy_exists():
changed = self.remove()
return changed
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.draft_exists() or self.policy_exists():
raise F5ModuleError("Failed to delete the policy")
return True
def create(self):
self._validate_creation_parameters()
self._set_changed_options()
if self.module.check_mode:
return True
if not self.draft_exists():
self._create_new_policy_draft()
# Because we always need to modify drafts, "creating on the device"
# is actually identical to just updating.
self.update_on_device()
if self.want.state == 'draft':
return True
else:
return self.publish()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
if not self.draft_exists():
self._create_existing_policy_draft()
if self._update_changed_options():
self.update_on_device()
if self.want.state == 'draft':
return True
else:
return self.publish()
def draft_exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def policy_exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def _create_existing_policy_draft(self):
params = dict(createDraft=True)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def _create_new_policy_draft(self):
params = self.want.api_params()
payload = dict(
name=self.want.name,
partition=self.want.partition,
subPath='Drafts',
**params
)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=payload)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self._upsert_policy_rules_on_device(draft=True)
def read_current_from_device(self):
if self.draft_exists():
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
)
else:
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
query = "?expandSubcollections=true"
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
rules = self._get_rule_names(response['rulesReference'])
result = ComplexParameters(params=response)
result.update(dict(rules=rules))
return result
def publish(self):
params = dict(
name=fq_name(self.want.partition,
self.want.name,
sub_path='Drafts'
),
command="publish"
)
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_policy_draft_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name, sub_path='Drafts'),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def remove_policy_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def remove_from_device(self):
if self.draft_exists():
self.remove_policy_draft_from_device()
if self.policy_exists():
self.remove_policy_from_device()
return True
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def rules(self):
if self.want.rules != self.have.rules:
return self.want.rules
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('simple')
else:
manager = self.get_manager('complex')
return manager.exec_module()
def get_manager(self, type):
if type == 'simple':
return SimpleManager(**self.kwargs)
elif type == 'complex':
return ComplexManager(**self.kwargs)
def version_is_less_than_12(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('12.1.0'):
return True
else:
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True
),
description=dict(),
rules=dict(type='list'),
strategy=dict(
choices=['first', 'all', 'best']
),
state=dict(
default='present',
choices=['absent', 'present', 'draft']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
stevelle/openstack-ansible | playbooks/roles/repo_server/files/openstack-wheel-builder.py | 2 | 12732 | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2015, Kevin Carter <[email protected]>
import os
import traceback
from distutils import version
import yaml
from cloudlib import arguments
from cloudlib import indicator
from cloudlib import shell
VERSION_DESCRIPTORS = ['>=', '<=', '==', '!=', '<', '>']
REQUIREMENTS_FILE_TYPES = [
'requirements.txt',
'global-requirements.txt',
'test-requirements.txt',
'dev-requirements.txt'
]
# List of variable names that could be used within the yaml files that
# represent lists of python packages.
BUILT_IN_PIP_PACKAGE_VARS = [
'service_pip_dependencies',
'pip_common_packages',
'pip_container_packages',
'pip_packages'
]
class DependencyFileProcessor(object):
def __init__(self, local_path):
"""
:type local_path: ``str``
:return:
"""
self.pip = dict()
self.pip['git_package'] = list()
self.pip['py_package'] = list()
self.git_pip_install = 'git+%s@%s'
self.file_names = self._get_files(path=local_path)
# Process everything simply by calling the method
self._process_files(ext=('yaml', 'yml'))
def _filter_files(self, file_names, ext):
"""Filter the files and return a sorted list.
:type file_names:
:type ext: ``str`` or ``tuple``
:returns: ``list``
"""
_file_names = list()
for file_name in file_names:
if file_name.endswith(ext):
if '/defaults/' in file_name or '/vars/' in file_name:
_file_names.append(file_name)
else:
continue
elif os.path.basename(file_name) in REQUIREMENTS_FILE_TYPES:
with open(file_name, 'rb') as f:
packages = [
i.split()[0] for i in f.read().splitlines()
if i
if not i.startswith('#')
]
self.pip['py_package'].extend(packages)
else:
return sorted(_file_names, reverse=True)
@staticmethod
def _get_files(path):
"""Return a list of all files in the vars/repo_packages directory.
:type path: ``str``
:returns: ``list``
"""
paths = os.walk(os.path.abspath(path))
files = list()
for fpath, _, afiles in paths:
for afile in afiles:
files.append(os.path.join(fpath, afile))
else:
return files
def _check_plugins(self, git_repo_plugins, git_data):
"""Check if the git url is a plugin type.
:type git_repo_plugins: ``dict``
:type git_data: ``dict``
"""
for repo_plugin in git_repo_plugins:
plugin = '%s/%s' % (
repo_plugin['path'].strip('/'),
repo_plugin['package'].lstrip('/')
)
package = self.git_pip_install % (
git_data['repo'],
'%s#egg=%s&subdirectory=%s' % (
git_data['branch'],
repo_plugin['package'].strip('/'),
plugin
)
)
self.pip['git_package'].append(package)
def _process_git(self, loaded_yaml, git_item):
"""Process git repos.
:type loaded_yaml: ``dict``
:type git_item: ``str``
"""
git_data = dict()
if git_item.split('_')[0] == 'git':
var_name = 'git'
else:
var_name = git_item.split('_')[0]
git_data['repo'] = loaded_yaml.get(git_item)
git_data['branch'] = loaded_yaml.get(
'%s_git_install_branch' % var_name.replace('.', '_')
)
if not git_data['branch']:
git_data['branch'] = loaded_yaml.get(
'git_install_branch',
'master'
)
package = self.git_pip_install % (
git_data['repo'], git_data['branch']
)
self.pip['git_package'].append(package)
git_repo_plugins = loaded_yaml.get('%s_repo_plugins' % var_name)
if git_repo_plugins:
self._check_plugins(
git_repo_plugins=git_repo_plugins,
git_data=git_data
)
def _process_files(self, ext):
"""Process files.
:type ext: ``tuple``
:type lower_priority: ``bol``
"""
file_names = self._filter_files(
file_names=self.file_names,
ext=ext
)
for file_name in file_names:
with open(file_name, 'rb') as f:
loaded_config = yaml.safe_load(f.read())
for key, values in loaded_config.items():
if key.endswith('git_repo'):
self._process_git(
loaded_yaml=loaded_config,
git_item=key
)
if [i for i in BUILT_IN_PIP_PACKAGE_VARS if i in key]:
self.pip['py_package'].extend(values)
def _arguments():
"""Return CLI arguments."""
arguments_dict = {
'optional_args': {
'local_path': {
'commands': [
'--local-path'
],
'help': 'Local path to cloned code.',
'metavar': '[PATH]',
'required': True
},
'report_file': {
'commands': [
'--report-file'
],
'help': 'Full path to write the package report to',
'metavar': '[FILE_PATH]',
'required': True
},
'storage_pool': {
'commands': [
'--storage-pool'
],
'help': 'Full path to the directory where you want to store'
' built wheels.',
'metavar': '[PATH]',
'required': True
},
'release_directory': {
'commands': [
'--release-directory'
],
'help': 'Full path to the directory where the releaesed links'
' will be stored.',
'metavar': '[PATH]',
'required': True
},
'add_on_repos': {
'commands': [
'--add-on-repos'
],
'help': 'Full repo path to require as an additional add on'
' repo. Example:'
' "git+https://github.com/rcbops/other-repo@master"',
'metavar': '[REPO_NAME]',
'nargs': '+'
},
'link_pool': {
'commands': [
'--link-pool'
],
'help': 'Full path to the directory links are stored.',
'metavar': '[PATH]',
'required': True
}
}
}
return arguments.ArgumentParserator(
arguments_dict=arguments_dict,
epilog='Licensed Apache2',
title='Discover all of the requirements within the'
' os-ansible-deployment project.',
detail='Requirement lookup',
description='Discover all of the requirements within the'
' os-ansible-deployment project.',
env_name='OS_ANSIBLE'
).arg_parser()
def _abs_path(path):
return os.path.abspath(
os.path.expanduser(
path
)
)
def _run_command(command):
print('Running "%s"' % command[2])
run_command = shell.ShellCommands(debug=True)
info, success = run_command.run_command(' '.join(command))
if not success:
raise SystemExit(info)
else:
print(info)
def main():
"""Run the main application."""
user_vars = _arguments()
return_list = list()
try:
dfp = DependencyFileProcessor(
local_path=_abs_path(user_vars['local_path'])
)
return_list.extend(dfp.pip['py_package'])
return_list.extend(dfp.pip['git_package'])
except Exception as exp:
raise SystemExit(
'Execution failure. Path: "%s", Error: "%s", Trace:\n%s' % (
user_vars['local_path'],
str(exp),
traceback.format_exc()
)
)
else:
return_data = {
'packages': list(),
'remote_packages': list()
}
for file_name in sorted(set(return_list)):
is_url = file_name.startswith(('http:', 'https:', 'git+'))
if is_url:
if '@' not in file_name:
return_data['packages'].append(file_name)
else:
return_data['remote_packages'].append(file_name)
else:
return_data['packages'].append(file_name)
else:
return_data['packages'] = ' '.join(
['"%s"' % i for i in set(return_data['packages'])]
)
if user_vars['add_on_repos']:
return_data['remote_packages'].extend(
[i.strip() for i in user_vars['add_on_repos']]
)
return_data['remote_packages'] = ' '.join(
['"%s"' % i for i in set(return_data['remote_packages'])]
)
# Build report
report_command = [
'yaprt',
'--debug',
'create-report',
'--report-file',
_abs_path(user_vars['report_file']),
'--git-install-repos',
return_data['remote_packages'],
'--packages',
return_data['packages']
]
_run_command(report_command)
# Build requirements wheels
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-index',
'http://rpc-repo.rackspace.com/pools',
'--pip-extra-index',
'https://pypi.python.org/simple',
'--pip-bulk-operation',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-requirements',
'--force-clean'
]
_run_command(requirements_command)
# Build wheels from git-repos
requirements_command = [
'yaprt',
'--debug',
'build-wheels',
'--report-file',
_abs_path(user_vars['report_file']),
'--storage-pool',
_abs_path(user_vars['storage_pool']),
'--link-dir',
_abs_path(user_vars['release_directory']),
'--pip-extra-link-dirs',
_abs_path(user_vars['link_pool']),
'--pip-no-deps',
'--pip-no-index',
'--build-output',
'/tmp/openstack-wheel-output',
'--build-dir',
'/tmp/openstack-builder',
'--build-branches',
'--build-releases',
'--force-clean'
]
_run_command(requirements_command)
# Create HTML index for all files in the release directory
index_command = [
'yaprt',
'--debug',
'create-html-indexes',
'--repo-dir',
_abs_path(user_vars['release_directory'])
]
_run_command(index_command)
if __name__ == '__main__':
main()
| apache-2.0 |
gh0std4ncer/doit | tests/test_exceptions.py | 7 | 2565 | from doit import exceptions
class TestInvalidCommand(object):
def test_just_string(self):
exception = exceptions.InvalidCommand('whatever string')
assert 'whatever string' == str(exception)
def test_task_not_found(self):
exception = exceptions.InvalidCommand(not_found='my_task')
exception.cmd_used = 'build'
assert 'command `build` invalid parameter: "my_task".' in str(exception)
def test_param_not_found(self):
exception = exceptions.InvalidCommand(not_found='my_task')
exception.cmd_used = None
want = 'Invalid parameter: "my_task". Must be a command,'
assert want in str(exception)
assert 'Type "doit help" to see' in str(exception)
def test_custom_binary_name(self):
exception = exceptions.InvalidCommand(not_found='my_task')
exception.cmd_used = None
exception.bin_name = 'my_tool'
assert 'Type "my_tool help" to see ' in str(exception)
class TestCatchedException(object):
def test_name(self):
class XYZ(exceptions.CatchedException):
pass
my_excp = XYZ("hello")
assert 'XYZ' == my_excp.get_name()
assert 'XYZ' in str(my_excp)
assert 'XYZ' in repr(my_excp)
def test_msg_notraceback(self):
my_excp = exceptions.CatchedException('got you')
msg = my_excp.get_msg()
assert 'got you' in msg
def test_exception(self):
try:
raise IndexError('too big')
except Exception as e:
my_excp = exceptions.CatchedException('got this', e)
msg = my_excp.get_msg()
assert 'got this' in msg
assert 'too big' in msg
assert 'IndexError' in msg
def test_catched(self):
try:
raise IndexError('too big')
except Exception as e:
my_excp = exceptions.CatchedException('got this', e)
my_excp2 = exceptions.CatchedException('handle that', my_excp)
msg = my_excp2.get_msg()
assert 'handle that' in msg
assert 'got this' not in msg # could be there too...
assert 'too big' in msg
assert 'IndexError' in msg
class TestAllCatched(object):
def test(self):
assert issubclass(exceptions.TaskFailed, exceptions.CatchedException)
assert issubclass(exceptions.TaskError, exceptions.CatchedException)
assert issubclass(exceptions.SetupError, exceptions.CatchedException)
assert issubclass(exceptions.DependencyError,
exceptions.CatchedException)
| mit |
Lupino/containerops | component/python/test/mamba/bootstrap.py | 5 | 4405 | #!/usr/bin/env python3
import subprocess
import os
import sys
import glob
import json
import anymarkup
REPO_PATH = 'git-repo'
def git_clone(url):
r = subprocess.run(['git', 'clone', url, REPO_PATH])
if r.returncode == 0:
return True
else:
print("[COUT] Git clone error: Invalid argument to exit",
file=sys.stderr)
print("[COUT] CO_RESULT = false")
return False
def get_pip_cmd(version):
if version == 'py3k' or version == 'python3':
return 'pip3'
return 'pip'
def get_python_cmd(version):
if version == 'py3k' or version == 'python3':
return 'python3'
return 'python'
def init_env(version):
subprocess.run([get_pip_cmd(version), 'install', 'mamba'])
def validate_version(version):
valid_version = ['python', 'python2', 'python3', 'py3k']
if version not in valid_version:
print("[COUT] Check version failed: the valid version is {}".format(valid_version), file=sys.stderr)
return False
return True
def setup(path, version='py3k'):
file_name = os.path.basename(path)
dir_name = os.path.dirname(path)
r = subprocess.run('cd {}; {} {} install'.format(dir_name, get_python_cmd(version), file_name),
shell=True)
if r.returncode != 0:
print("[COUT] install dependences failed", file=sys.stderr)
return False
return True
def pip_install(file_name, version='py3k'):
r = subprocess.run([get_pip_cmd(version), 'install', '-r', file_name])
if r.returncode != 0:
print("[COUT] install dependences failed", file=sys.stderr)
return False
return True
def mamba(file_name):
r = subprocess.run('cd {}; mamba {} --enable-coverage'.format(REPO_PATH, file_name), shell=True)
if r.returncode != 0:
print("[COUT] mamba error", file=sys.stderr)
return False
return True
def echo_json(use_yaml):
file_name = '{}/.coverage'.format(REPO_PATH)
data = open(file_name).read()
idx = data.find('{')
data = data[idx:]
data = json.loads(data)
if use_yaml:
data = anymarkup.serialize(data, 'yaml')
print('[COUT] CO_YAML_CONTENT {}'.format(str(data)[1:]))
else:
print('[COUT] CO_JSON_CONTENT {}'.format(json.dumps(data)))
def parse_argument():
data = os.environ.get('CO_DATA', None)
if not data:
return {}
validate = ['git-url', 'entry-file', 'version', 'out-put-type']
ret = {}
for s in data.split(' '):
s = s.strip()
if not s:
continue
arg = s.split('=')
if len(arg) < 2:
print('[COUT] Unknown Parameter: [{}]'.format(s))
continue
if arg[0] not in validate:
print('[COUT] Unknown Parameter: [{}]'.format(s))
continue
ret[arg[0]] = arg[1]
return ret
def main():
argv = parse_argument()
git_url = argv.get('git-url')
if not git_url:
print("[COUT] The git-url value is null", file=sys.stderr)
print("[COUT] CO_RESULT = false")
return
version = argv.get('version', 'py3k')
if not validate_version(version):
print("[COUT] CO_RESULT = false")
return
init_env(version)
entry_file = argv.get('entry-file')
if not entry_file:
print("[COUT] The entry-file value is null", file=sys.stderr)
print("[COUT] CO_RESULT = false")
return
if not git_clone(git_url):
return
for file_name in glob.glob('{}/setup.py'.format(REPO_PATH)):
setup(file_name, version)
for file_name in glob.glob('{}/*/setup.py'.format(REPO_PATH)):
setup(file_name, version)
for file_name in glob.glob('{}/requirements.txt'.format(REPO_PATH)):
pip_install(file_name, version)
for file_name in glob.glob('{}/*/requirements.txt'.format(REPO_PATH)):
pip_install(file_name, version)
for file_name in glob.glob('{}/requirements_dev.txt'.format(REPO_PATH)):
pip_install(file_name, version)
for file_name in glob.glob('{}/*/requirements_dev.txt'.format(REPO_PATH)):
pip_install(file_name, version)
out = mamba(entry_file)
use_yaml = argv.get('out-put-type', 'json') == 'yaml'
echo_json(use_yaml)
if not out:
print("[COUT] CO_RESULT = false")
return
print("[COUT] CO_RESULT = true")
main()
| apache-2.0 |
dsquareindia/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
gatieme/AderXCoding | language/python/re/memFI.py | 1 | 2785 | #!/usr/bin/python
# encoding=utf-8
#!coding:utf-8
import re
import sys
import urllib2
import argparse
import commands
import os
import subprocess
if __name__ == "__main__" :
# 测试正则表达式
reload(sys)
sys.setdefaultencoding("utf-8")
if len(sys.argv) > 1: # 如果在程序运行时,传递了命令行参数
# 打印传递的命令行参数的信息
print "您输入的所有参数共 %d 个,信息为 sys.argv = %s" % (len(sys.argv), sys.argv)
for i, eachArg in enumerate(sys.argv):
print "[%d] = %s" % (i, eachArg)
else:
print "Useage : read.py -r shell..."
exit(0)
# 创建一个解析对象
# 然后向该对象中添加你要关注的命令行参数和选项
# 每一个add_argument方法对应一个你要关注的参数或选项
# 最后调用parse_args()方法进行解析
# 解析成功之后即可使用
parser = argparse.ArgumentParser( )
parser.add_argument("-r", "--run", dest = "shell_parser", help = "The file you want to read...")
args = parser.parse_args( )
shell = args.shell_parser
try:
# 首先执行shell脚本,并读取到返回值的信息
#(status, output) = commands.getstatusoutput("ls")
#print status, output
#output = os.popen('ls')
#print output.read()
#print subprocess.call(["ls","-al"])
#handle = subprocess.Popen("ls", shell=True, stdout=subprocess.PIPE)
#print handle.communicate()[0]
output = subprocess.check_output("ls")
#print all_text.decode("utf-8")
#print all_text
#匹配的信息如下
#[ 2016-1-26 23:25:52]Process 18497 exited with code 0
#[ 2016-1-26 23:26:5]Process 18556 termed with signal 11(SIGSEGV)
reStr = r'.*?(\d{4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}:\d{1,2})]Process (\d{1,5}) (exited with code \d|termed with signal \d{1,2}\((.*?)\))'
#item[0] -=> [ 2016-1-26 23:25:52]
#item[1] -=> pid
#item[2] -=> Process 18497 exited with code 0 | Process 18556 termed with signal 11(SIGSEGV)
#item[3] -=> "" | SIGSEGV
non_exception = 0
sigsegv_exception = 0
pattern = re.compile(reStr, re.S)
myItems = re.findall(pattern, output)
print len(myItems)
#print myItems
for item in myItems:
#print item
if item[3] == "" :
non_exception += 1
elif item[3] == "SIGSEGV":
sigsegv_exception += 1
print "无异常", non_exception
print "SIGSEGV", sigsegv_exception
if non_exception = 1 :
print "0"
else
print "1", item[3]
finally:
pass | gpl-2.0 |
gautam1858/tensorflow | tensorflow/python/keras/wrappers/scikit_learn.py | 4 | 12808 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for using the Scikit-Learn API with Keras models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import types
import numpy as np
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.utils.generic_utils import has_arg
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.util.tf_export import keras_export
class BaseWrapper(object):
"""Base class for the Keras scikit-learn wrapper.
Warning: This class should not be used directly.
Use descendant classes instead.
Arguments:
build_fn: callable function or class instance
**sk_params: model parameters & fitting parameters
The `build_fn` should construct, compile and return a Keras model, which
will then be used to fit/predict. One of the following
three values could be passed to `build_fn`:
1. A function
2. An instance of a class that implements the `__call__` method
3. None. This means you implement a class that inherits from either
`KerasClassifier` or `KerasRegressor`. The `__call__` method of the
present class will then be treated as the default `build_fn`.
`sk_params` takes both model parameters and fitting parameters. Legal model
parameters are the arguments of `build_fn`. Note that like all other
estimators in scikit-learn, `build_fn` should provide default values for
its arguments, so that you could create the estimator without passing any
values to `sk_params`.
`sk_params` could also accept parameters for calling `fit`, `predict`,
`predict_proba`, and `score` methods (e.g., `epochs`, `batch_size`).
fitting (predicting) parameters are selected in the following order:
1. Values passed to the dictionary arguments of
`fit`, `predict`, `predict_proba`, and `score` methods
2. Values passed to `sk_params`
3. The default values of the `keras.models.Sequential`
`fit`, `predict`, `predict_proba` and `score` methods
When using scikit-learn's `grid_search` API, legal tunable parameters are
those you could pass to `sk_params`, including fitting parameters.
In other words, you could use `grid_search` to search for the best
`batch_size` or `epochs` as well as the model parameters.
"""
def __init__(self, build_fn=None, **sk_params):
self.build_fn = build_fn
self.sk_params = sk_params
self.check_params(sk_params)
def check_params(self, params):
"""Checks for user typos in `params`.
Arguments:
params: dictionary; the parameters to be checked
Raises:
ValueError: if any member of `params` is not a valid argument.
"""
legal_params_fns = [
Sequential.fit, Sequential.predict, Sequential.predict_classes,
Sequential.evaluate
]
if self.build_fn is None:
legal_params_fns.append(self.__call__)
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
legal_params_fns.append(self.build_fn.__call__)
else:
legal_params_fns.append(self.build_fn)
for params_name in params:
for fn in legal_params_fns:
if has_arg(fn, params_name):
break
else:
if params_name != 'nb_epoch':
raise ValueError('{} is not a legal parameter'.format(params_name))
def get_params(self, **params): # pylint: disable=unused-argument
"""Gets parameters for this estimator.
Arguments:
**params: ignored (exists for API compatibility).
Returns:
Dictionary of parameter names mapped to their values.
"""
res = copy.deepcopy(self.sk_params)
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
"""Sets the parameters of this estimator.
Arguments:
**params: Dictionary of parameter names mapped to their values.
Returns:
self
"""
self.check_params(params)
self.sk_params.update(params)
return self
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Arguments:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
"""
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif (not isinstance(self.build_fn, types.FunctionType) and
not isinstance(self.build_fn, types.MethodType)):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(x, y, **fit_args)
return history
def filter_sk_params(self, fn, override=None):
"""Filters `sk_params` and returns those in `fn`'s arguments.
Arguments:
fn : arbitrary function
override: dictionary, values to override `sk_params`
Returns:
res : dictionary containing variables
in both `sk_params` and `fn`'s arguments.
"""
override = override or {}
res = {}
for name, value in self.sk_params.items():
if has_arg(fn, name):
res.update({name: value})
res.update(override)
return res
@keras_export('keras.wrappers.scikit_learn.KerasClassifier')
class KerasClassifier(BaseWrapper):
"""Implementation of the scikit-learn classifier API for Keras.
"""
def fit(self, x, y, **kwargs):
"""Constructs a new model with `build_fn` & fit the model to `(x, y)`.
Arguments:
x : array-like, shape `(n_samples, n_features)`
Training samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
Returns:
history : object
details about the training history at each epoch.
Raises:
ValueError: In case of invalid shape for `y` argument.
"""
y = np.array(y)
if len(y.shape) == 2 and y.shape[1] > 1:
self.classes_ = np.arange(y.shape[1])
elif (len(y.shape) == 2 and y.shape[1] == 1) or len(y.shape) == 1:
self.classes_ = np.unique(y)
y = np.searchsorted(self.classes_, y)
else:
raise ValueError('Invalid shape for y: ' + str(y.shape))
self.n_classes_ = len(self.classes_)
return super(KerasClassifier, self).fit(x, y, **kwargs)
def predict(self, x, **kwargs):
"""Returns the class predictions for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
preds: array-like, shape `(n_samples,)`
Class predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
classes = self.model.predict_classes(x, **kwargs)
return self.classes_[classes]
def predict_proba(self, x, **kwargs):
"""Returns class probability estimates for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments
of `Sequential.predict_classes`.
Returns:
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
In the case of binary classification,
to match the scikit-learn API,
will return an array of shape `(n_samples, 2)`
(instead of `(n_sample, 1)` as in Keras).
"""
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
probs = self.model.predict_proba(x, **kwargs)
# check if binary classification
if probs.shape[1] == 1:
# first column is probability of class 0 and second is of class 1
probs = np.hstack([1 - probs, probs])
return probs
def score(self, x, y, **kwargs):
"""Returns the mean accuracy on the given test data and labels.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
Raises:
ValueError: If the underlying model isn't configured to
compute accuracy. You should pass `metrics=["accuracy"]` to
the `.compile()` method of the model.
"""
y = np.searchsorted(self.classes_, y)
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(x, y, **kwargs)
if not isinstance(outputs, list):
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name == 'acc':
return output
raise ValueError('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@keras_export('keras.wrappers.scikit_learn.KerasRegressor')
class KerasRegressor(BaseWrapper):
"""Implementation of the scikit-learn regressor API for Keras.
"""
def predict(self, x, **kwargs):
"""Returns predictions for the given test data.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict`.
Returns:
preds: array-like, shape `(n_samples,)`
Predictions.
"""
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return np.squeeze(self.model.predict(x, **kwargs))
def score(self, x, y, **kwargs):
"""Returns the mean loss on the given test data and labels.
Arguments:
x: array-like, shape `(n_samples, n_features)`
Test samples where `n_samples` is the number of samples
and `n_features` is the number of features.
y: array-like, shape `(n_samples,)`
True labels for `x`.
**kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
Returns:
score: float
Mean accuracy of predictions on `x` wrt. `y`.
"""
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss = self.model.evaluate(x, y, **kwargs)
if isinstance(loss, list):
return -loss[0]
return -loss
| apache-2.0 |
ferrants/ansible-modules-core | utilities/logic/async_wrapper.py | 189 | 6183 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import subprocess
import sys
import datetime
import traceback
import signal
import time
import syslog
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# logger.info("cobblerd started")
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(022)
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
if len(sys.argv) < 3:
print json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
})
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# setup logging directory
logdir = os.path.expanduser("~/.ansible_async")
log_path = os.path.join(logdir, jid)
if not os.path.exists(logdir):
try:
os.makedirs(logdir)
except:
print json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % logdir
})
def _run_command(wrapped_cmd, jid, log_path):
logfile = open(log_path, "w")
logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
logfile.close()
logfile = open(log_path, "w")
result = {}
outdata = ''
try:
cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False,
stdin=None, stdout=logfile, stderr=logfile)
script.communicate()
outdata = file(log_path).read()
result = json.loads(outdata)
except (OSError, IOError), e:
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
except:
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary debug only
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
logfile.close()
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
#import logging
#import logging.handlers
#logger = logging.getLogger("ansible_async")
#logger.setLevel(logging.WARNING)
#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") )
def debug(msg):
#logger.warning(msg)
pass
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
time.sleep(1)
debug("Return async_wrapper task started.")
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path })
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
debug("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
debug("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(5)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
debug("%s still running (%s)"%(sub_pid, remaining))
time.sleep(5)
remaining = remaining - 5
if remaining <= 0:
debug("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
debug("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
debug("Done in kid B.")
os._exit(0)
else:
# the child process runs the actual module
debug("Start module (%s)"%os.getpid())
_run_command(cmd, jid, log_path)
debug("Module complete (%s)"%os.getpid())
sys.exit(0)
except Exception, err:
debug("error: %s"%(err))
raise err
| gpl-3.0 |
steedos/odoo | addons/account_budget/wizard/account_budget_analytic.py | 375 | 2045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_budget_analytic(osv.osv_memory):
_name = 'account.budget.analytic'
_description = 'Account Budget report for analytic account'
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['ids'] = datas['ids']
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_analyticaccountbudget', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lowfatcomputing/amforth | core/devices/atmega323/device.py | 5 | 1689 | # Partname: ATmega323
# Built using part description XML file version 203
# generated automatically, do not edit
MCUREGS = {
'ADCH': '$25',
'ADCL': '$24',
'ADCSR': '$26',
'ADMUX': '$27',
'ACSR': '$28',
'SFIOR': '$50',
'MCUCR': '$55',
'MCUCSR': '$54',
'OSCCAL': '$51',
'SPH': '$5E',
'SPL': '$5D',
'SPMCR': '$57',
'SREG': '$5F',
'EEARH': '$3F',
'EEARL': '$3E',
'EECR': '$3C',
'EEDR': '$3D',
'GICR': '$5B',
'GIFR': '$5A',
'DDRA': '$3A',
'PINA': '$39',
'PORTA': '$3B',
'DDRB': '$37',
'PINB': '$36',
'PORTB': '$38',
'DDRC': '$34',
'PINC': '$33',
'PORTC': '$35',
'DDRD': '$31',
'PIND': '$30',
'PORTD': '$32',
'SPCR': '$2D',
'SPDR': '$2F',
'SPSR': '$2E',
'OCR0': '$5C',
'TCCR0': '$53',
'TCNT0': '$52',
'TIFR': '$58',
'TIMSK': '$59',
'ICR1H': '$47',
'ICR1L': '$46',
'OCR1AH': '$4B',
'OCR1AL': '$4A',
'OCR1BH': '$49',
'OCR1BL': '$48',
'TCCR1A': '$4F',
'TCCR1B': '$4E',
'TCNT1H': '$4D',
'TCNT1L': '$4C',
'ASSR': '$42',
'OCR2': '$43',
'TCCR2': '$45',
'TCNT2': '$44',
'TWAR': '$22',
'TWBR': '$20',
'TWCR': '$56',
'TWDR': '$23',
'TWSR': '$21',
'UBRRH': '$40',
'UBRRL': '$29',
'UCSRA': '$2B',
'UCSRB': '$2A',
'UDR': '$2C',
'WDTCR': '$41',
'INT0Addr': '$002',
'INT1Addr': '$004',
'INT2Addr': '$006',
'TIMER2_COMPAddr': '$008',
'TIMER2_OVFAddr': '$00A',
'TIMER1_CAPTAddr': '$00C',
'TIMER1_COMPAAddr': '$00E',
'TIMER1_COMPBAddr': '$010',
'TIMER1_OVFAddr': '$012',
'TIMER0_COMPAddr': '$014',
'TIMER0_OVFAddr': '$016',
'SPI_STCAddr': '$018',
'USART_RXCAddr': '$01A',
'USART_UDREAddr': '$01C',
'USART_TXCAddr': '$01E',
'ADCAddr': '$020',
'EE_RDYAddr': '$022',
'ANA_COMPAddr': '$024',
'TWIAddr': '$026',
'SPM_RDYAddr': '$28'
} | gpl-2.0 |
maftieu/CouchPotatoServer | libs/sqlalchemy/dialects/mssql/adodbapi.py | 18 | 2284 | # mssql/adodbapi.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
The adodbapi dialect is not implemented for 0.6 at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = 'adodbapi'
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.DateTime:MSDateTime_adodbapi
}
)
def create_connect_args(self, url):
keys = url.query
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append ("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append ("Data Source=%s" % keys.get("host"))
connectors.append ("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join (connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)
dialect = MSDialect_adodbapi
| gpl-3.0 |
cartertech/odoo-hr-ng | hr_infraction/wizard/batch.py | 1 | 3336 | #-*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 One Click Software (http://oneclick.solutions)
# and Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp.netsvc
from openerp.osv import fields, orm, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DFORMAT
from openerp.tools.translate import _
class infraction_batch(orm.TransientModel):
_name ='hr.infraction.batch'
_description = 'Generate mass infraction incidents'
_columns = {
'employee_ids': fields.many2many('hr.employee', 'hr_employee_infraction_batch_rel', 'infraction_id', 'employee_id', 'Employees'),
'category_id': fields.many2one('hr.infraction.category', 'Infraction Category', required=True),
'name': fields.char('Subject', size=256, required=True,),
'date': fields.date('Date', required=True),
'memo': fields.text('Description'),
}
_defaults = {
'date': time.strftime(OE_DFORMAT),
}
def onchange_category(self, cr, uid, ids, category_id, context=None):
res = {'value': {'name': False}}
if category_id:
category = self.pool.get('hr.infraction.category').browse(cr, uid, category_id,
context=context)
res['value']['name'] = category.name
return res
def create_infractions(self, cr, uid, ids, context=None):
infra_obj = self.pool.get('hr.infraction')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
if not data['employee_ids']:
raise osv.except_osv(_("Warning !"), _("You must select at least one employee to generate wage adjustments."))
infra_ids = []
vals = {
'name': data['name'],
'category_id': data['category_id'][0],
'date': data['date'],
'memo': data['memo'],
'employee_id': False,
}
for ee_id in data['employee_ids']:
vals['employee_id'] = ee_id,
infra_ids.append(infra_obj.create(cr, uid, vals, context=context))
wkf_service = netsvc.LocalService('workflow')
for i_id in infra_ids:
wkf_service.trg_validate(uid, 'hr.infraction', i_id, 'signal_confirm', cr)
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
mtcode/autoscaler | hack/scripts/break_mig.py | 1 | 5500 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script breaks a given GCE MIG to simulate zone failure or similar disaster
scenario for testing purposes.
It works by polling `gcloud compute instances list` and adding iptables rules
on master to block ip addresses of instances, whose name matches pattern.
The script runs in endless until you kill it with signal (ctrl-c?) and than
it cleans up (remove iptables rules it added) before exiting.
Run with -e flag to break existing nodes in the node group and -u to break
new nodes added after the script was started. You're free to use both this
flags together to break all nodes.
Messing with iptables rules on master is obviously unsafe and can potentially
lead to completely breaking your cluster!
'''
import argparse
import atexit
import collections
import re
import subprocess
import sys
import time
InstanceInfo = collections.namedtuple("InstanceInfo", 'name ip')
def get_instances(master, ng):
'''Poll instances list and parse result to list of InstanceInfo structs'''
raw = subprocess.check_output(['gcloud', 'compute', 'instances', 'list'])
first = True
result = []
for l in raw.splitlines():
if first:
first = False
continue
parts = l.split()
name = parts[0]
if not name.startswith(ng):
continue
ips = []
for p in parts[1:]:
if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', p):
ips.append(p)
# XXX: A VM has showed up, but it doens't have internal and external ip
# yet, let's just pretend we haven't seen it yet
if len(ips) < 2:
continue
info = InstanceInfo(name, ips)
result.append(info)
return result
def break_node(master, instance, broken_ips, verbose):
'''Add iptable rules to drop packets coming from ips used by a give node'''
print 'Breaking node {}'.format(instance.name)
for ip in instance.ip:
if verbose:
print 'Blocking ip {} on master'.format(ip)
subprocess.call(['gcloud', 'compute', 'ssh', master, '--', 'sudo iptables -I INPUT 1 -p tcp -s {} -j DROP'.format(ip)])
broken_ips.add(ip)
def run(master, ng, existing, upcoming, broken_ips, verbose):
'''
Poll for new nodes and break them as required.
Runs an endless loop.
'''
instances = get_instances(master, ng)
known = set()
for inst in instances:
if existing:
break_node(master, inst, broken_ips, verbose)
known.add(inst.name)
while True:
instances = get_instances(master, ng)
for inst in instances:
if inst.name in known:
continue
if verbose:
print 'New instance observed: {}'.format(inst.name)
if upcoming:
break_node(master, inst, broken_ips, verbose)
known.add(inst.name)
time.sleep(5)
def clean_up(master, broken, verbose):
'''
Clean up iptable rules created by this script.
WARNING: this just deletes top N rules if you've added some rules to the
top of INPUT chain while this was running you will suffer.
'''
if verbose:
print 'Cleaning up top {} iptable rules'.format(len(broken))
for i in xrange(len(broken)):
subprocess.call(['gcloud', 'compute', 'ssh', master, '--', 'sudo iptables -D INPUT 1'])
def main():
parser = argparse.ArgumentParser(description='Break all existing and/or upcoming node in a MIG')
parser.add_argument('master_name', help='name of kubernetes master (will be used with gcloud)')
parser.add_argument('node_group_name', help='name of node group to break')
parser.add_argument('-e', '--existing', help='break existing nodes (they will become unavailable)', action='store_true')
parser.add_argument('-u', '--upcoming', help='break any new nodes added to this node group (they will not register at all)', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-y', '--yes', action='store_true')
args = parser.parse_args()
if not args.existing and not args.upcoming:
print 'At least one of --existing or --upcoming must be specified'
return
if not args.yes:
print 'Running this script will break nodes in your cluster for testing purposes.'
print 'The nodes may or may not recover after this. Your whole cluster may be broken.'
print 'DO NOT RUN THIS SCRIPT ON PRODUCTION CLUSTER.'
print 'Do you want to proceed? (anything but y stops the script)'
user_ok = sys.stdin.read(1)
if user_ok.upper() != 'Y':
return
broken = set()
atexit.register(clean_up, args.master_name, broken, args.verbose)
run(args.master_name, args.node_group_name, args.existing, args.upcoming, broken, args.verbose)
if __name__ == '__main__':
main()
| apache-2.0 |
wetneb/django | tests/timezones/tests.py | 10 | 54662 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"- fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10), is_superuser=True, username='super',
first_name='Super', last_name='User', email='[email protected]',
is_staff=True, is_active=True, date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
| bsd-3-clause |
notthetup/openshift-iojs | bin/iojs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| apache-2.0 |
mjtamlyn/django | django/contrib/messages/storage/base.py | 18 | 6090 | from django.conf import settings
from django.contrib.messages import constants, utils
from django.utils.encoding import force_text
LEVEL_TAGS = utils.get_level_tags()
class Message:
"""
Represent an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepare the message for serialization by forcing the ``message``
and ``extra_tags`` to str in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_text`` implementation for details).
"""
self.message = force_text(self.message, strings_only=True)
self.extra_tags = force_text(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return force_text(self.message)
@property
def tags(self):
extra_tags = force_text(self.extra_tags, strings_only=True)
if extra_tags and self.level_tag:
return ' '.join([extra_tags, self.level_tag])
elif extra_tags:
return extra_tags
elif self.level_tag:
return self.level_tag
return ''
@property
def level_tag(self):
return force_text(LEVEL_TAGS.get(self.level, ''), strings_only=True)
class BaseStorage:
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Return a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieve a list of stored messages. Return a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages and return a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepare a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Store all unread messages.
If the backend has yet to be iterated, store previously stored messages
again. Otherwise, only store messages added after the last iteration.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queue a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Return the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Set a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| bsd-3-clause |
drinkssu/YourVoiceAlarmBackend | lib/werkzeug/contrib/securecookie.py | 318 | 12204 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if not b'=' in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| apache-2.0 |
googleads/google-ads-python | google/ads/googleads/v8/enums/types/price_placeholder_field.py | 1 | 2698 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"PricePlaceholderFieldEnum",},
)
class PricePlaceholderFieldEnum(proto.Message):
r"""Values for Price placeholder fields. """
class PricePlaceholderField(proto.Enum):
r"""Possible values for Price placeholder fields."""
UNSPECIFIED = 0
UNKNOWN = 1
TYPE = 2
PRICE_QUALIFIER = 3
TRACKING_TEMPLATE = 4
LANGUAGE = 5
FINAL_URL_SUFFIX = 6
ITEM_1_HEADER = 100
ITEM_1_DESCRIPTION = 101
ITEM_1_PRICE = 102
ITEM_1_UNIT = 103
ITEM_1_FINAL_URLS = 104
ITEM_1_FINAL_MOBILE_URLS = 105
ITEM_2_HEADER = 200
ITEM_2_DESCRIPTION = 201
ITEM_2_PRICE = 202
ITEM_2_UNIT = 203
ITEM_2_FINAL_URLS = 204
ITEM_2_FINAL_MOBILE_URLS = 205
ITEM_3_HEADER = 300
ITEM_3_DESCRIPTION = 301
ITEM_3_PRICE = 302
ITEM_3_UNIT = 303
ITEM_3_FINAL_URLS = 304
ITEM_3_FINAL_MOBILE_URLS = 305
ITEM_4_HEADER = 400
ITEM_4_DESCRIPTION = 401
ITEM_4_PRICE = 402
ITEM_4_UNIT = 403
ITEM_4_FINAL_URLS = 404
ITEM_4_FINAL_MOBILE_URLS = 405
ITEM_5_HEADER = 500
ITEM_5_DESCRIPTION = 501
ITEM_5_PRICE = 502
ITEM_5_UNIT = 503
ITEM_5_FINAL_URLS = 504
ITEM_5_FINAL_MOBILE_URLS = 505
ITEM_6_HEADER = 600
ITEM_6_DESCRIPTION = 601
ITEM_6_PRICE = 602
ITEM_6_UNIT = 603
ITEM_6_FINAL_URLS = 604
ITEM_6_FINAL_MOBILE_URLS = 605
ITEM_7_HEADER = 700
ITEM_7_DESCRIPTION = 701
ITEM_7_PRICE = 702
ITEM_7_UNIT = 703
ITEM_7_FINAL_URLS = 704
ITEM_7_FINAL_MOBILE_URLS = 705
ITEM_8_HEADER = 800
ITEM_8_DESCRIPTION = 801
ITEM_8_PRICE = 802
ITEM_8_UNIT = 803
ITEM_8_FINAL_URLS = 804
ITEM_8_FINAL_MOBILE_URLS = 805
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
koyuawsmbrtn/eclock | windows/Python27/Lib/idlelib/idle_test/test_config_name.py | 49 | 2431 | """Unit tests for idlelib.configSectionNameDialog"""
import unittest
from idlelib.idle_test.mock_tk import Var, Mbox
from idlelib import configSectionNameDialog as name_dialog_module
name_dialog = name_dialog_module.GetCfgSectionNameDialog
class Dummy_name_dialog(object):
# Mock for testing the following methods of name_dialog
name_ok = name_dialog.name_ok.im_func
Ok = name_dialog.Ok.im_func
Cancel = name_dialog.Cancel.im_func
# Attributes, constant or variable, needed for tests
used_names = ['used']
name = Var()
result = None
destroyed = False
def destroy(self):
self.destroyed = True
# name_ok calls Mbox.showerror if name is not ok
orig_mbox = name_dialog_module.tkMessageBox
showerror = Mbox.showerror
class ConfigNameTest(unittest.TestCase):
dialog = Dummy_name_dialog()
@classmethod
def setUpClass(cls):
name_dialog_module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
name_dialog_module.tkMessageBox = orig_mbox
def test_blank_name(self):
self.dialog.name.set(' ')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('No', showerror.message)
def test_used_name(self):
self.dialog.name.set('used')
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('use', showerror.message)
def test_long_name(self):
self.dialog.name.set('good'*8)
self.assertEqual(self.dialog.name_ok(), '')
self.assertEqual(showerror.title, 'Name Error')
self.assertIn('too long', showerror.message)
def test_good_name(self):
self.dialog.name.set(' good ')
showerror.title = 'No Error' # should not be called
self.assertEqual(self.dialog.name_ok(), 'good')
self.assertEqual(showerror.title, 'No Error')
def test_ok(self):
self.dialog.destroyed = False
self.dialog.name.set('good')
self.dialog.Ok()
self.assertEqual(self.dialog.result, 'good')
self.assertTrue(self.dialog.destroyed)
def test_cancel(self):
self.dialog.destroyed = False
self.dialog.Cancel()
self.assertEqual(self.dialog.result, '')
self.assertTrue(self.dialog.destroyed)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
| gpl-2.0 |
lambder/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/masm.py | 61 | 2998 | """SCons.Tool.masm
Tool-specific initialization for the Microsoft Assembler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/masm.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Defaults
import SCons.Tool
import SCons.Util
ASSuffixes = ['.s', '.asm', '.ASM']
ASPPSuffixes = ['.spp', '.SPP', '.sx']
if SCons.Util.case_sensitive_suffixes('.s', '.S'):
ASPPSuffixes.extend(['.S'])
else:
ASSuffixes.extend(['.S'])
def generate(env):
"""Add Builders and construction variables for masm to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ASSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASAction)
shared_obj.add_action(suffix, SCons.Defaults.ASAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
for suffix in ASPPSuffixes:
static_obj.add_action(suffix, SCons.Defaults.ASPPAction)
shared_obj.add_action(suffix, SCons.Defaults.ASPPAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['AS'] = 'ml'
env['ASFLAGS'] = SCons.Util.CLVar('/nologo')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS /c /Fo$TARGET $SOURCES'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c /Fo$TARGET $SOURCES'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('ml')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
roger-zhao/ardupilot-3.5-dev | Tools/ardupilotwaf/mavgen.py | 36 | 3330 | #!/usr/bin/env python
# encoding: utf-8
# (c) Siddharth Bharat Purohit, 3DRobotics Inc.
"""
The **mavgen.py** program is a code generator which creates mavlink header files.
"""
from waflib import Logs, Task, Utils, Node
from waflib.TaskGen import feature, before_method, extension
import os
import os.path
from xml.etree import ElementTree as et
class mavgen(Task.Task):
"""generate mavlink header files"""
color = 'BLUE'
before = 'cxx c'
def scan(self):
nodes = []
names = []
entry_point = self.inputs[0]
queue = [entry_point]
head = 0
while head < len(queue):
node = queue[head]
head += 1
tree = et.parse(node.abspath())
root = tree.getroot()
includes = root.findall('include')
for i in includes:
path = i.text.strip()
n = node.parent.find_node(path)
if n:
nodes.append(n)
if n not in queue:
queue.append(n)
continue
path = os.path.join(
node.parent.path_from(entry_point.parent),
path
)
if not path in names:
names.append(path)
return nodes, names
def run(self):
python = self.env.get_flat('PYTHON')
mavgen = self.env.get_flat('MAVGEN')
out = self.env.get_flat('OUTPUT_DIR')
src = self.env.get_flat('SRC')
ret = self.exec_command('{} {} --lang=C --wire-protocol=2.0 --output {} {}'.format(
python, mavgen, out, self.inputs[0].abspath()))
if ret != 0:
# ignore if there was a signal to the interpreter rather
# than a real error in the script. Some environments use a
# signed and some an unsigned return for this
if ret > 128 or ret < 0:
Logs.warn('mavgen crashed with code: {}'.format(ret))
ret = 0
else:
Logs.error('mavgen returned {} error code'.format(ret))
return ret
def post_run(self):
super(mavgen, self).post_run()
for header in self.generator.output_dir.ant_glob("*.h **/*.h", remove=False):
header.sig = header.cache_sig = self.cache_sig
def options(opt):
opt.load('python')
@feature('mavgen')
@before_method('process_source')
def process_mavgen(self):
if not hasattr(self, 'output_dir'):
self.bld.fatal('mavgen: missing option output_dir')
inputs = self.to_nodes(self.source)
outputs = []
self.source = []
if not isinstance(self.output_dir, Node.Node):
self.output_dir = self.bld.bldnode.find_or_declare(self.output_dir)
task = self.create_task('mavgen', inputs, outputs)
task.env['OUTPUT_DIR'] = self.output_dir.abspath()
task.env.env = dict(os.environ)
task.env.env['PYTHONPATH'] = task.env.MAVLINK_DIR
def configure(cfg):
"""
setup environment for mavlink header generator
"""
cfg.load('python')
cfg.check_python_version(minver=(2,7,0))
env = cfg.env
env.MAVLINK_DIR = cfg.srcnode.make_node('modules/mavlink/').abspath()
env.MAVGEN = env.MAVLINK_DIR + '/pymavlink/tools/mavgen.py'
| gpl-3.0 |
GreatFruitOmsk/snakefood | lib/python/snakefood/fallback/collections.py | 3 | 1609 | """
Safe fallback for defaultdict, in order to support 2.4.
"""
## From http://code.activestate.com/recipes/523034/
try:
from collections import defaultdict
except:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
| gpl-2.0 |
OaklandPeters/fastnode | fast/test_node.py | 1 | 1831 | """
Unit-tests and examples of syntax and behavior desired for AST-nodes.
"""
import unittest
import operator
import pdb
from .node import denode, FASTNode, NodeInterface
a, b, c = 5, 4, 2
inner = FASTNode(operator.__mul__, b, c)
outer = FASTNode(operator.__add__, a, inner)
# tree = FASTNode(
# operator.__add__,
# a,
# FASTNode(
# operator.__mul__,
# b,
# c
# )
# )
class NodeTests(unittest.TestCase):
def test_denode(self):
self.assertEqual(denode(inner), 8)
def test_nesting(self):
self.assertEqual(inner(), outer.positional[1]())
def test_basic(self):
# 5 + 4 * 2
standard_result = a + b * c
# (+ 5 (* 4 2))
operator_result = operator.__add__(
a,
operator.__mul__(
b,
c
)
)
node_result = outer.__call__() # evaluate via __call__
self.assertEqual(node_result, operator_result)
self.assertEqual(node_result, standard_result)
def test_call(self):
# Compare implicit VS explicit __call__
self.assertEqual(
inner(),
inner.__call__()
)
self.assertEqual(
outer(),
outer.__call__()
)
self.assertEqual(denode(inner), inner())
self.assertEqual(denode(outer), outer())
def test_nonnested_denode(self):
self.assertEqual(
outer.positional[1]._denode(),
8
)
def test_denode_function(self):
self.assertEqual(
denode(outer.positional[1]),
8
)
def test_repr(self):
self.assertIsInstance(repr(outer), str)
self.assertEqual(repr(inner), "FASTNode::__mul__(4, 2)")
if __name__ == "__main__":
unittest.main()
| mit |
pcmanus/cassandra-dtest | loadmaker_test.py | 2 | 1189 | import time
import loadmaker
from dtest import Tester
class TestLoadmaker(Tester):
def loadmaker_test(self):
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
time.sleep(.2)
host, port = node1.network_interfaces['thrift']
lm = loadmaker.LoadMaker(host, port, column_family_name='cf_standard',
consistency_level='ONE')
lm.generate(500)
lm.validate()
lm.update(100)
lm.validate()
lm.delete(10)
lm.validate()
lm = loadmaker.LoadMaker(host, port, column_family_name='cf_counter',
is_counter=True,
consistency_level='ONE')
lm.generate(200)
lm.validate()
lm1 = loadmaker.LoadMaker(host, port, column_family_name='cf_standard2',
consistency_level='ONE')
lm2 = loadmaker.LoadMaker(host, port, column_family_name='cf_counter2',
is_counter=True,
consistency_level='ONE')
cont_loader = loadmaker.ContinuousLoader([lm1, lm2])
time.sleep(10)
cont_loader.read_and_validate()
cont_loader.exit()
| apache-2.0 |
srowen/spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
ndaniel/fusioncatcher | bin/generate_1000genomes.py | 1 | 9395 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
It generates the list of candidate fusion genes. This list is hard coded
in here and it is manually curated from:
Greger et al. Tandem RNA Chimeras Contribute to Transcriptome Diversity in
Human Population and Are Associated with Intronic Genetic Variants,
Plos One, Aug 2014, http://dx.doi.org/10.1371/journal.pone.0104567
Author: Daniel Nicorici, [email protected]
Copyright (c) 2009-2021 Daniel Nicorici
This file is part of FusionCatcher.
FusionCatcher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FusionCatcher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FusionCatcher (see file 'COPYING.txt'). If not, see
<http://www.gnu.org/licenses/>.
By default, FusionCatcher is running BLAT aligner
<http://users.soe.ucsc.edu/~kent/src/> but it offers also the option to disable
all its scripts which make use of BLAT aligner if you choose explicitly to do so.
BLAT's license does not allow to be used for commercial activities. If BLAT
license does not allow to be used in your case then you may still use
FusionCatcher by forcing not use the BLAT aligner by specifying the option
'--skip-blat'. Fore more information regarding BLAT please see its license.
Please, note that FusionCatcher does not require BLAT in order to find
candidate fusion genes!
This file is not running/executing/using BLAT.
"""
import sys
import os
import optparse
import symbols
if __name__ == '__main__':
#command line parsing
usage = "%prog [options]"
description = """It generates the list of pre-candidate fusion genes from 1000 genomes project."""
version = "%prog 0.12 beta"
parser = optparse.OptionParser(usage=usage,description=description,version=version)
parser.add_option("--organism",
action = "store",
type = "string",
dest = "organism",
default = "homo_sapiens",
help="""The name of the organism for which the list of allowed candidate fusion genes is generated, e.g. homo_sapiens, mus_musculus, etc. Default is '%default'.""")
parser.add_option("--output",
action="store",
type="string",
dest="output_directory",
default = '.',
help="""The output directory where the list of allowed candidate fusion genes is generated. Default is '%default'.""")
parser.add_option("--skip-filter-overlap",
action="store_true",
dest="skip_filter_overlap",
default = False,
help="""If set then it filters out the known fusion genes where the (i) genes are fully overlapping, or (ii) the genes are partially overlapping and are on the same strand. Default is '%default'.""")
(options,args) = parser.parse_args()
# validate options
if not (options.output_directory
):
parser.print_help()
sys.exit(1)
#
#
#
print "Generating the list of 1000 genomes fusion genes..."
fusions = dict()
# manual curation from papers
fusions['rattus_norvegicus'] = []
fusions['mus_musculus'] = []
fusions['canis_familiaris'] = []
fusions['homo_sapiens'] = [
['ACTB','POTEE'],
['ACTB','POTEM'],
['AIG1','PARL'],
['AP5S1','MAVS'],
['ARHGAP19','SLIT1'],
['ARL4A','MTHFD1L'],
['BPTF','LRRC37A3'],
['C11ORF48','INTS5'],
['C1ORF189','TOX'],
['C2ORF27A','NBEA'],
['C6ORF72','PPIL4'],
['C7ORF55','LUC7L2'],
['CCL22','CX3CL1'],
['CENPE','BDH2'],
['CHURC1','FNTB'],
['CLN6','CALML4'],
['CNPY2','CS'],
['COPE','CERS1'],
['COPE','LASS1'],
['CORO7','PAM16'],
['COX5A','EDC3'],
['CTBS','GNG5'],
['CTSC','RAB38'],
['DDX5','POLG2'],
['EDARADD','ENO1'],
['EEF1A1','XPOT'],
['ELAVL1','TIMM44'],
['ENTPD1','CC2D2B'],
['FAM18B2','CDRT4'],
['FARSB','TRIM61'],
['FKBP1A','SDCBP2'],
['GNG5','CTBS'],
['GPI','PDCD2L'],
['HACL1','COLQ'],
['HAUS4','PRMT5'],
['HILPDA','EFCAB3'],
['HMSD','SERPINB8'],
['HSPE1','MOBKL3'],
['IFNAR2','IL10RB'],
['IFRD1','C7ORF53'],
['ISY1','RAB43'],
['JAK3','INSL3'],
['KIAA0101','CSNK1G1'],
['KIAA0494','ATPAF1'],
['LMAN2','MXD3'],
['LRRC33','PIGX'],
['LSP1','TNNT3'],
['MAPKAPK5','ACAD10'],
['MED8','ELOVL1'],
['METTL10','FAM53B'],
['METTL21B','TSFM'],
['NAIP','OCLN'],
['NDUFA13','YJEFN3'],
['NDUFB8','SEC31B'],
['NHP2L1','LLPH'],
['NRXN1','EIF2AK2'],
['NSUN4','FAAH'],
['PEX26','TUBA8'],
['PFKFB4','SHISA5'],
['PKHD1L1','EBAG9'],
['PLEKHO2','ANKDD1A'],
['POLA2','CDC42EP2'],
['POLR1A','REEP1'],
['PPIP5K1','CATSPER2'],
['PPRC1','NOLC1'],
['PRH1','PRR4'],
['PRIM1','NACA'],
['PRKAA1','TTC33'],
['PRKCB','YBX1'],
['PRR11','C17ORF71'],
['PRR13','PCBP2'],
['PXMP2','PGAM5'],
['RBM14','RBM4'],
['RHOQ','LRR1'],
['RNASET2','RPS6KA2'],
['RRM2','C2ORF48'],
['S1PR2','DNMT1'],
['SAV1','GYPE'],
['SDHAF2','C11ORF66'],
['SDHD','TEX12'],
['SLC35A3','HIAT1'],
['SLC39A1','CRTC2'],
['SLC43A3','PRG2'],
['SMC4','BCL6'],
['SMG1','ARL6IP1'],
['SNTB2','VPS4A'],
['SP100','HMGB1'],
['SUMO2','HN1'],
['SYNJ2BP','COX16'],
['TAGLN2','CCDC19'],
['TAP2','HLA-DOB'],
['TFG','GPR128'],
['TMBIM4','LLPH'],
['TNFAIP8L2','SCNM1'],
['TOMM5','FBXO10'],
['TOPORS','DDX58'],
['TPD52L2','DNAJC5'],
['TRIP12','SLC16A14'],
['TSC22D4','C7ORF61'],
['TSTD1','F11R'],
['TYK2','CDC37'],
['UBA2','WTIP'],
['UBE2J1','GABRR2'],
['UBE2J2','FAM132A'],
['UCHL3','LMO7'],
['UQCRQ','LEAP2'],
['VBP1','BRCC3'],
['VKORC1','PRSS53'],
['YARS2','NAP1L1'],
['ZNF175','CTU1'],
['ZNF343','SNRPB'],
['ZNF562','RBAK']
]
data = fusions.get(options.organism.lower(),[])
if data:
#file_symbols = os.path.join(options.output_directory,'genes_symbols.txt')
file_symbols = os.path.join(options.output_directory,'synonyms.txt')
loci = symbols.generate_loci(file_symbols)
genes = symbols.read_genes_symbols(file_symbols)
d = []
for (g1,g2) in data:
if g1.upper() != g2.upper():
ens1 = symbols.ensembl(g1.upper(),genes,loci)
ens2 = symbols.ensembl(g2.upper(),genes,loci)
if ens1 and ens2:
for e1 in ens1:
for e2 in ens2:
if e1 != e2:
d.append([e1,e2])
data = ['\t'.join(sorted(line)) + '\n' for line in d]
data = list(set(data))
print "%d known fusion genes found in manually currated database" % (len(data),)
if not options.skip_filter_overlap:
d1 = []
overlappings = ['ensembl_fully_overlapping_genes.txt',
'ensembl_same_strand_overlapping_genes.txt',
# 'refseq_fully_overlapping_genes.txt',
# 'refseq_same_strand_overlapping_genes.txt',
# 'ucsc_fully_overlapping_genes.txt',
# 'ucsc_same_strand_overlapping_genes.txt',
# 'pairs_pseudogenes.txt',
# 'paralogs.txt'
]
for ov in overlappings:
p = os.path.join(options.output_directory,ov)
print "Parsing file:",p
if os.path.isfile(p):
d2 = sorted(set([tuple(sorted(line.rstrip('\r\n').split('\t'))) for line in file(p,'r').readlines() if line.rstrip('\r\n')]))
d1.extend(d2)
d = set()
for line in d1:
(a,b) = (line[0],line[1])
if a > b:
(a,b) = (b,a)
d.add("%s\t%s\n" % (a,b))
skipped = [line for line in data if line in d]
data = [line for line in data if not line in d]
file(os.path.join(options.output_directory,'1000genomes_known_but_overlapping.txt'),'w').writelines(sorted(skipped))
print "%d known fusion genes left after removing the overlappings" % (len(data),)
file(os.path.join(options.output_directory,'1000genomes.txt'),'w').writelines(sorted(data))
#
| gpl-3.0 |
pacoqueen/odfpy | contrib/html2odt/shtml2odt.py | 7 | 22876 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
import string, sys, re, getopt
import urllib2, htmlentitydefs, urlparse
from urllib import quote_plus
from HTMLParser import HTMLParser
from cgi import escape,parse_header
from types import StringType
from odf.opendocument import OpenDocumentText, load
from odf import dc, text, table
import htmlstyles
def converturl(url, document=None):
""" grab and convert url
"""
url = string.strip(url)
# if url.lower()[:5] != "http:":
# raise IOError, "Only http is accepted"
_proxies = {}
proxy_support = urllib2.ProxyHandler(_proxies)
opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header("User-agent", "HTML2ODT: Convert HTML to OpenDocument")
conn = urllib2.urlopen(req)
if not conn:
raise IOError, "Failure in open"
data = conn.read()
headers = conn.info()
conn.close()
encoding = 'iso8859-1' #Standard HTML
if headers.has_key('content-type'):
(ct, parms) = parse_header(headers['content-type'])
if parms.has_key('charset'):
encoding = parms['charset']
mhp = HTML2ODTParser(document, encoding, url)
mhp.feed(data)
return mhp
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
incomplete = re.compile('&[a-zA-Z#]')
ampersand = re.compile('&')
def listget(list, key, default=None):
for l in list:
if l[0] == key:
default = l[1]
return default
class TagObject:
def __init__(self, tag, attrs, output_loc):
self.tag = tag
self.attrs = attrs
self.output_loc = output_loc
class HTML2ODTParser(HTMLParser):
def __init__(self, document, encoding, baseurl):
HTMLParser.__init__(self)
self.doc = document
self.curr = self.doc.text
if self.doc.getStyleByName("Standard") is None:
style = Style(name="Standard", family="paragraph", attributes={'class':"text"})
self.doc.styles.addElement(style)
if self.doc.getStyleByName("Text_20_body") is None:
style = Style(name="Text_20_body", displayname="Text body", family="paragraph",
parentstylename="Standard", attributes={'class':"text"})
p = ParagraphProperties(margintop="0cm", marginbottom="0.212cm")
style.addElement(p)
self.doc.styles.addElement(style)
if self.doc.getStyleByName("Heading") is None:
style = Style(name="Heading", family="paragraph", parentstylename="Standard",
nextstylename="Text_20_body", attributes={'class':"text"})
p = ParagraphProperties(margintop="0.423cm", marginbottom="0.212cm", keepwithnext="always")
style.addElement(p)
p = TextProperties(fontname="Nimbus Sans L", fontsize="14pt",
fontnameasian="DejaVu LGC Sans", fontsizeasian="14pt",
fontnamecomplex="DejaVu LGC Sans", fontsizecomplex="14pt")
style.addElement(p)
self.doc.styles.addElement(style)
self.encoding = encoding
(scheme, host, path, params, fragment) = urlparse.urlsplit(baseurl)
lastslash = path.rfind('/')
if lastslash > -1:
path = path[:lastslash]
self.baseurl = urlparse.urlunsplit((scheme, host, path,'',''))
self.basehost = urlparse.urlunsplit((scheme, host, '','',''))
self.sectnum = 0
self.tagstack = []
self.pstack = []
self.processelem = True
self.processcont = True
self.__data = []
self.elements = {
'a': (self.s_html_a, self.close_tag),
'base': ( self.output_base, None),
'b': ( self.s_html_fontstyle, self.close_tag),
'big': ( self.s_html_fontstyle, self.close_tag),
'br': ( self.output_br, None),
'col': ( self.s_html_col, None),
'dd': ( self.s_html_dd, self.close_tag),
'dt': ( self.s_html_dt, None),
'div': ( self.s_html_section, self.e_html_section),
'em': ( self.s_html_emphasis, self.close_tag),
'h1': ( self.s_html_headline, self.close_tag),
'h2': ( self.s_html_headline, self.close_tag),
'h3': ( self.s_html_headline, self.close_tag),
'h4': ( self.s_html_headline, self.close_tag),
'h5': ( self.s_html_headline, self.close_tag),
'h6': ( self.s_html_headline, self.close_tag),
'head': ( self.s_ignorexml, None),
'i': ( self.s_html_fontstyle, self.close_tag),
'img': ( self.output_img, None),
'li': ( self.s_html_li, self.e_html_li),
'meta': ( self.meta_encoding, None),
'ol': ( self.output_ol, self.e_html_list),
'p': ( self.s_html_block, self.e_html_block),
's': ( self.s_html_fontstyle, self.close_tag),
'small':( self.s_html_fontstyle, self.close_tag),
'span': ( self.s_html_span, self.close_tag),
'strike':( self.s_html_fontstyle, self.close_tag),
'strong':( self.s_html_emphasis, self.close_tag),
'table':( self.s_html_table, self.e_html_table),
'td': ( self.s_html_td, self.close_tag),
'th': ( self.s_html_td, self.close_tag),
'title':( self.s_html_title, self.e_html_title),
'tr': ( self.s_html_tr, self.close_tag),
'tt': ( self.s_html_fontstyle, self.close_tag),
'u': ( self.s_html_fontstyle, self.close_tag),
'ul': ( self.output_ul, self.e_html_list),
'var': ( self.s_html_emphasis, self.close_tag),
}
def result(self):
""" Return a string
String must be in UNICODE
"""
str = string.join(self.__data,'')
self.__data = []
return str
def meta_name(self, attrs):
""" Look in meta tag for textual info"""
foundit = 0
# Is there a name attribute?
for attr in attrs:
if attr[0] == 'name' and string.lower(attr[1]) in ('description',
'keywords','title',
'dc.description','dc.keywords','dc.title'
):
foundit = 1
if foundit == 0:
return 0
# Is there a content attribute?
content = self.find_attr(attrs,'content')
if content:
self.handle_data(u' ')
self.handle_attr(content)
self.handle_data(u' ')
return 1
def meta_encoding(self, tag, attrs):
""" Look in meta tag for page encoding (Content-Type)"""
foundit = 0
# Is there a content-type attribute?
for attr in attrs:
if attr[0] == 'http-equiv' and string.lower(attr[1]) == 'content-type':
foundit = 1
if foundit == 0:
return 0
# Is there a content attribute?
for attr in attrs:
if attr[0] == 'content':
(ct, parms) = parse_header(attr[1])
if parms.has_key('charset'):
self.encoding = parms['charset']
return 1
def s_ignorexml(self, tag, attrs):
self.processelem = False
def output_base(self, tag, attrs):
""" Change the document base if there is a base tag """
baseurl = listget(attrs, 'href', self.baseurl)
(scheme, host, path, params, fragment) = urlparse.urlsplit(baseurl)
lastslash = path.rfind('/')
if lastslash > -1:
path = path[:lastslash]
self.baseurl = urlparse.urlunsplit((scheme, host, path,'',''))
self.basehost = urlparse.urlunsplit((scheme, host, '','',''))
def output_br(self, tag, attrs):
self.curr.addElement(text.LineBreak())
def s_html_font(self, tag, attrs):
""" 15.2.1 Font style elements: the TT, I, B, BIG, SMALL,
STRIKE, S, and U elements
"""
tagdict = {
}
def s_html_emphasis(self, tag, attrs):
""" 9.2.1 Phrase elements: EM, STRONG, DFN, CODE, SAMP, KBD,
VAR, CITE, ABBR, and ACRONYM
"""
tagdict = {
'cite': ['Citation', {'fontstyle':"italic", 'fontstyleasian':"italic", 'fontstylecomplex':"italic" }],
'code': ['Source_20_Text', {'fontname':"Courier", 'fontnameasian':"Courier",'fontnamecomplex':"Courier" }],
'dfn': ['Definition',{ }],
'em': ['Emphasis', {'fontstyle':"italic", 'fontstyleasian':"italic", 'fontstylecomplex':"italic" }],
'strong': ['Strong_20_Emphasis': {'fontweight':"bold",'fontweightasian':"bold",'fontweightcomplex':"bold"}],
'var': ['Variable', {'fontstyle':"italic", 'fontstyleasian':"italic", 'fontstylecomplex':"italic" }],
}
stylename = tagdict.get(tag,'Emphasis')
# Add the styles we need to the stylesheet
if stylename == "Source_20_Text" and self.doc.getStyleByName(stylename) is None:
style = Style(name="Source_20_Text", displayname="Source Text", family="text")
p = TextProperties(fontname="Courier", fontnameasian="Courier", fontnamecomplex="Courier")
style.addElement(p)
self.doc.styles.addElement(style)
e = text.Span(stylename=stylename)
self.curr.addElement(e)
self.curr = e
def s_html_fontstyle(self, tag, attrs):
""" 15.2.1 Font style elements: the TT, I, B, BIG, SMALL,
STRIKE, S, and U elements
('tt' is not considered an automatic style by OOo)
"""
tagdict = {
'b': ['BoldX',{'fontweight':"bold",
'fontweightasian':"bold",'fontweightcomplex':"bold" }],
'big': ['BigX', {'fontsize':"120%"}],
'i': ['ItalicX', {'fontstyle':"italic", 'fontstyleasian':"italic", 'fontstylecomplex':"italic" }],
'tt': ['TeletypeX', {'fontname':"Courier", 'fontnameasian':"Courier", 'fontnamecomplex':"Courier" }],
's': ['StrikeX', {'textlinethroughstyle':"solid"}],
'small': ['SmallX', {'fontsize':"80%"}],
'strike': ['StrikeX', {'textlinethroughstyle':"solid"}],
'u': ['UnderlineX', {'textunderlinestyle':"solid", 'textunderlinewidth':"auto",
'textunderlinecolor':"fontcolor"}],
}
stylename,styledecl = tagdict.get(tag,[None,None])
if stylename and self.doc.getStyleByName(stylename) is None:
style = Style(name=stylename, family="text")
style.addElement(TextProperties(attributes=styledecl))
self.doc.automaticstyles.addElement(style)
if stylename:
e = text.Span(stylename=stylename)
else:
e = text.Span()
self.curr.addElement(e)
self.curr = e
def s_html_span(self, tag, attrs):
e = text.Span()
self.curr.addElement(e)
self.curr = e
def s_html_title(self, tag, attrs):
e = dc.Title()
self.doc.meta.addElement(e)
self.curr = e
def e_html_title(self, tag):
self.curr = self.curr.parentNode
def output_img(self, tag, attrs):
src = listget(attrs, 'src', "Illegal IMG tag!")
alt = listget(attrs, 'alt', src)
# Must remember name of image and download it.
self.write_odt(u'<draw:image xlink:href="Pictures/%s" xlink:type="simple" xlink:show="embed" xlink:actuate="onLoad"/>' % '00000.png')
def s_html_a(self, tag, attrs):
href = None
href = listget(attrs, 'href', None)
if href:
if href in ("", "#"):
href == self.baseurl
elif href.find("://") >= 0:
pass
elif href[0] == '/':
href = self.basehost + href
e = text.A(type="simple", href=href)
else:
e = text.A()
# if self.curr.parentNode.qname != text.P().qname:
# p = text.P()
# self.curr.addElement(p)
# self.curr = p
self.curr.addElement(e)
self.curr = e
def close_tag(self, tag):
self.curr = self.curr.parentNode
def s_html_dd(self, tag, attrs):
if self.doc.getStyleByName("List_20_Contents") is None:
style = Style(name="List_20_Contents", displayname="List Contents", family="paragraph",
parentstylename="Standard", attributes={'class':"html"})
p = ParagraphProperties(marginleft="1cm", marginright="0cm", textindent="0cm", autotextindent="false")
style.addElement(p)
self.doc.styles.addElement(style)
e = text.P(stylename="List_20_Contents")
self.curr.addElement(e)
self.curr = e
def s_html_dt(self, tag, attrs):
if self.doc.getStyleByName("List_20_Heading") is None:
style = Style(name="List_20_Heading", displayname="List Heading", family="paragraph", parentstylename="Standard",
nextstylename="List_20_Contents", attributes={'class':"html"})
p = ParagraphProperties(marginleft="0cm", marginright="0cm", textindent="0cm", autotextindent="false")
style.addElement(p)
self.doc.styles.addElement(style)
e = text.P(stylename="List_20_Heading")
self.curr.addElement(e)
self.curr = e
def output_ul(self, tag, attrs):
self.write_odt(u'<text:list text:style-name="List_20_1">')
def output_ol(self, tag, attrs):
self.write_odt(u'<text:list text:style-name="Numbering_20_1">')
def e_html_list(self, tag):
self.write_odt(u'</text:list>')
def s_html_li(self, tag, attrs):
self.write_odt(u'<text:list-item><text:p text:style-name="P1">')
def e_html_li(self, tag):
self.write_odt(u'</text:p></text:list-item>')
def s_html_headline(self, tag, attrs):
stylename = "Heading_20_%s" % tag[1]
if stylename == "Heading_20_1" and self.doc.getStyleByName("Heading_20_1") is None:
style = Style(name="Heading_20_1", displayname="Heading 1",
family="paragraph", parentstylename="Heading", nextstylename="Text_20_body",
attributes={'class':"text"}, defaultoutlinelevel=1)
p = TextProperties(fontsize="115%", fontweight="bold", fontsizeasian="115%",
fontweightasian="bold", fontsizecomplex="115%", fontweightcomplex="bold")
style.addElement(p)
self.doc.styles.addElement(style)
if stylename == "Heading_20_2" and self.doc.getStyleByName("Heading_20_2") is None:
style = Style(name="Heading_20_2", displayname="Heading 2",
family="paragraph", parentstylename="Heading", nextstylename="Text_20_body",
attributes={'class':"text"}, defaultoutlinelevel=2)
p = TextProperties(fontsize="14pt", fontstyle="italic", fontweight="bold",
fontsizeasian="14pt", fontstyleasian="italic", fontweightasian="bold",
fontsizecomplex="14pt", fontstylecomplex="italic", fontweightcomplex="bold")
style.addElement(p)
self.doc.styles.addElement(style)
if stylename == "Heading_20_3" and self.doc.getStyleByName("Heading_20_3") is None:
style = Style(name="Heading_20_3", displayname="Heading 3",
family="paragraph", parentstylename="Heading", nextstylename="Text_20_body",
attributes={'class':"text"}, defaultoutlinelevel=3)
p = TextProperties(fontsize="14pt", fontweight="bold", fontsizeasian="14pt",
fontweightasian="bold", fontsizecomplex="14pt", fontweightcomplex="bold")
style.addElement(p)
self.doc.styles.addElement(style)
e = text.H(stylename="Heading_20_%s" % tag[1], outlinelevel=tag[1])
self.curr.addElement(e)
self.curr = e
def s_html_table(self, tag, attrs):
e = table.Table()
self.curr.addElement(e)
self.curr = e
def e_html_table(self, tag):
self.curr = self.curr.parentNode
def s_html_td(self, tag, attrs):
e = table.TableCell()
self.curr.addElement(e)
self.curr = e
def s_html_tr(self, tag, attrs):
e = table.TableRow()
self.curr.addElement(e)
self.curr = e
def s_html_col(self, tag, attrs):
e = table.TableColumn()
self.curr.addElement(e)
def s_html_section(self, tag, attrs):
""" Outputs block tag such as <p> and <div> """
name = self.find_attr(attrs,'id')
if name is None:
self.sectnum = self.sectnum + 1
name = "Sect%d" % self.sectnum
e = text.Section(name=name)
self.curr.addElement(e)
self.curr = e
def e_html_section(self, tag):
""" Outputs block tag such as <p> and <div> """
self.curr = self.curr.parentNode
def s_html_block(self, tag, attrs):
""" Outputs block tag such as <p> and <div> """
e = text.P(stylename="Text_20_body")
self.curr.addElement(e)
self.curr = e
def e_html_block(self, tag):
""" Outputs block tag such as <p> and <div> """
self.curr = self.curr.parentNode
#
# HANDLE STARTTAG
#
def handle_starttag(self, tag, attrs):
self.pstack.append( (self.processelem, self.processcont) )
tagobj = TagObject(tag, attrs, self.last_data_pos())
self.tagstack.append(tagobj)
method = self.elements.get(tag, (None, None))[0]
if self.processelem and method:
method(tag, attrs)
#
# HANDLE END
#
def handle_endtag(self, tag):
"""
"""
tagobj = self.tagstack.pop()
method = self.elements.get(tag, (None, None))[1]
if self.processelem and method:
method(tag)
self.processelem, self.processcont = self.pstack.pop()
#
# Data operations
#
def handle_data(self, data):
if data.strip() == '': return
if self.processelem and self.processcont:
self.curr.addText(data)
def write_odt(self, data):
""" Collect the data to show on the webpage """
if type(data) == StringType:
data = unicode(data, self.encoding)
self.__data.append(data)
def last_data_pos(self):
return len(self.__data)
def find_attr(self, attrs, key):
""" Run through the attibutes to find a specific one
return None if not found
"""
for attr in attrs:
if attr[0] == key:
return attr[1]
return None
#
# Tagstack operations
#
def find_tag(self, tag):
""" Run down the stack to find the last entry with the same tag name
Not Tested
"""
for tagitem in range(len(self.tagstack), 0, -1):
if tagitem.tag == tag:
return tagitem
return None
def handle_charref(self, name):
""" Handle character reference for UNICODE
"""
if name[0] in ('x', 'X'):
try:
n = int(name[1:],16)
except ValueError:
return
else:
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 65535:
return
self.handle_data(unichr(n))
def handle_entityref(self, name):
"""Handle entity references.
"""
table = htmlentitydefs.name2codepoint
if name in table:
self.handle_data(unichr(table[name]))
else:
return
def handle_attr(self, attrval):
""" Scan attribute values for entities and resolve them
Simply calls handle_data
"""
i = 0
n = len(attrval)
while i < n:
match = ampersand.search(attrval, i) #
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(attrval[i:j])
i = j
if i == n: break
startswith = attrval.startswith
if startswith('&#', i):
match = charref.match(attrval, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = k
continue
else:
break
elif startswith('&', i):
match = entityref.match(attrval, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = k
continue
match = incomplete.match(attrval, i)
if match:
# match.group() will contain at least 2 chars
if match.group() == attrval[i:]:
self.error("EOF in middle of entity or char ref")
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = i + 1
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if i < n:
self.handle_data(attrval[i:n])
i = n
def usage():
sys.stderr.write("Usage: %s [-a] inputurl outputfile\n" % sys.argv[0])
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "a", ["append"])
except getopt.GetoptError:
usage()
sys.exit(2)
appendto = False
for o, a in opts:
if o in ("-a", "--append"):
appendto = True
if appendto:
doc = load(args[1])
else:
doc = OpenDocumentText()
result = converturl(args[0], doc)
print result.doc.xml()
result.doc.save(args[1])
| gpl-2.0 |
michaelgira23/JEREMY | Skype4Py/filetransfer.py | 21 | 3643 | """File transfers.
"""
__docformat__ = 'restructuredtext en'
import os
from utils import *
class FileTransfer(Cached):
"""Represents a file transfer.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('FILETRANSFER', self.Id, AlterName, Args)
def _Property(self, PropName, Set=None):
return self._Owner._Property('FILETRANSFER', self.Id, PropName, Set)
def _GetBytesPerSecond(self):
return int(self._Property('BYTESPERSECOND'))
BytesPerSecond = property(_GetBytesPerSecond,
doc="""Transfer speed in bytes per second.
:type: int
""")
def _GetBytesTransferred(self):
return long(self._Property('BYTESTRANSFERRED'))
BytesTransferred = property(_GetBytesTransferred,
doc="""Number of bytes transferred.
:type: long
""")
def _GetFailureReason(self):
return str(self._Property('FAILUREREASON'))
FailureReason = property(_GetFailureReason,
doc="""Transfer failure reason.
:type: `enums`.fileTransferFailureReason*
""")
def _GetFileName(self):
return os.path.basename(self.FilePath)
FileName = property(_GetFileName,
doc="""Name of the transferred file.
:type: str
""")
def _GetFilePath(self):
return unicode2path(self._Property('FILEPATH'))
FilePath = property(_GetFilePath,
doc="""Full path to the transferred file.
:type: str
""")
def _GetFileSize(self):
return long(self._Property('FILESIZE'))
FileSize = property(_GetFileSize,
doc="""Size of the transferred file in bytes.
:type: long
""")
def _GetFinishDatetime(self):
from datetime import datetime
return datetime.fromtimestamp(self.FinishTime)
FinishDatetime = property(_GetFinishDatetime,
doc="""File transfer end date and time.
:type: datetime.datetime
""")
def _GetFinishTime(self):
return float(self._Property('FINISHTIME'))
FinishTime = property(_GetFinishTime,
doc="""File transfer end timestamp.
:type: float
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""Unique file transfer Id.
:type: int
""")
def _GetPartnerDisplayName(self):
return self._Property('PARTNER_DISPNAME')
PartnerDisplayName = property(_GetPartnerDisplayName,
doc="""File transfer partner DisplayName.
:type: unicode
""")
def _GetPartnerHandle(self):
return str(self._Property('PARTNER_HANDLE'))
PartnerHandle = property(_GetPartnerHandle,
doc="""File transfer partner Skypename.
:type: str
""")
def _GetStartDatetime(self):
from datetime import datetime
return datetime.fromtimestamp(self.StartTime)
StartDatetime = property(_GetStartDatetime,
doc="""File transfer start date and time.
:type: datetime.datetime
""")
def _GetStartTime(self):
return float(self._Property('STARTTIME'))
StartTime = property(_GetStartTime,
doc="""File transfer start timestamp.
:type: float
""")
def _GetStatus(self):
return str(self._Property('STATUS'))
Status = property(_GetStatus,
doc="""File transfer status.
:type: `enums`.fileTransferStatus*
""")
def _GetType(self):
return str(self._Property('TYPE'))
Type = property(_GetType,
doc="""File transfer type.
:type: `enums`.fileTransferType*
""")
class FileTransferCollection(CachedCollection):
_CachedType = FileTransfer
| bsd-3-clause |
hexcap/dpkt | dpkt/tns.py | 6 | 1079 | # $Id: tns.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Transparent Network Substrate."""
import dpkt
class TNS(dpkt.Packet):
__hdr__ = (
('length', 'H', 0),
('pktsum', 'H', 0),
('type', 'B', 0),
('rsvd', 'B', 0),
('hdrsum', 'H', 0),
('msg', '0s', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.length - self.__hdr_len__
if n > len(self.data):
raise dpkt.NeedData('short message (missing %d bytes)' %
(n - len(self.data)))
self.msg = self.data[:n]
self.data = self.data[n:]
def test_tns():
s = ('\x00\x23\x00\x00\x01\x00\x00\x00\x01\x34\x01\x2c\x00\x00\x08\x00\x7f'
'\xff\x4f\x98\x00\x00\x00\x01\x00\x01\x00\x22\x00\x00\x00\x00\x01\x01X')
t = TNS(s)
assert t.msg.startswith('\x01\x34')
# test a truncated packet
try:
t = TNS(s[:-10])
except dpkt.NeedData:
pass
if __name__ == '__main__':
test_tns()
print 'Tests Successful...'
| bsd-3-clause |
tedsunnyday/SE-Server | server/lib/requests/packages/charade/hebrewprober.py | 206 | 13642 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| apache-2.0 |
Pankaj-Sakariya/android-source-browsing.platform--external--chromium-trace | trace-viewer/third_party/closure_linter/closure_linter/ecmalintrules.py | 123 | 34907 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)',
'[email protected] (Jacob Richman)')
import re
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokens
from closure_linter import javascripttokenizer
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import htmlutil
from closure_linter.common import lintrunner
from closure_linter.common import position
from closure_linter.common import tokens
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# Static constants.
MAX_LINE_LENGTH = 80
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile('(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile('\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.NORMAL):
# Dots are acceptable places to wrap.
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except:
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > self.MAX_LINE_LENGTH:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max = 1
if '@param' in parts:
max = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags)) > max):
self._HandleError(errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
"""
flag = token.attached_object
type = flag.type
if type and type is not None and not type.isspace():
pieces = self.TYPE_SPLIT.split(type)
if len(pieces) == 1 and type.count('|') == 1 and (
type.endswith('|null') or type.startswith('null|')):
self._HandleError(errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % type, token)
for p in pieces:
if p.count('|') and p.count('?'):
# TODO(robbyw): We should do actual parsing of JsDoc types. As is,
# this won't report an error for {number|Array.<string>?}, etc.
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
Position.AtBeginning())
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT)):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token)
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, Position(0, space_count))
elif (type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif type == Type.END_BLOCK:
# This check is for object literal end block tokens, but there is no need
# to test that condition since a comma at the end of any other kind of
# block is undoubtedly a parse error.
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of object literal', last_code,
Position.All(last_code.string))
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
if state.InTopLevelFunction():
# When the function was top-level and not immediately called, check
# that it's terminated by a semi-colon.
if state.InAssignedFunction():
if not is_immediately_called and (last_in_line or
not token.next.type == Type.SEMICOLON):
self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, Position.AtEnd(token.string))
else:
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, Position.All(token.next.string))
if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
self._HandleError(errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, Position.All(token.next.string))
elif type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, Position.All(token.string))
elif type == Type.START_PAREN:
if token.previous and token.previous.type == Type.KEYWORD:
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous, Position.All(token.previous.string))
if token.type == Type.END_BRACKET:
last_code = token.metadata.last_code
if last_code.IsOperator(','):
self._HandleError(errors.COMMA_AT_END_OF_LITERAL,
'Illegal comma at end of array literal', last_code,
Position.All(last_code.string))
elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
Position(1, len(token.string) - 1))
elif type == Type.OPERATOR:
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):
self._HandleError(errors.EXTRA_SPACE,
'Extra space before "%s"' % token.string, token.previous,
Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
Position.AtBeginning())
# Check that binary operators are not used to start lines.
if ((not last_code or last_code.line_number != token.line_number) and
not token.metadata.IsUnaryOperator()):
self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator should go on previous line "%s"' % token.string,
token)
elif type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.type.split('|'):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type,
token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author [email protected] (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space after email address',
token.next,
Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
# We want punctuation to be inside of any tags ending a description,
# so strip tags before checking description. See bug 1127192. Note
# that depending on how lines break, the real description end token
# may consist only of stripped html and the effective end token can
# be different.
end_token = flag.description_end_token
end_string = htmlutil.StripTags(end_token.string).strip()
while (end_string == '' and not
end_token.type in Type.FLAG_ENDING_TYPES):
end_token = end_token.previous
if end_token.type in Type.FLAG_DESCRIPTION_TYPES:
end_string = htmlutil.StripTags(end_token.string).rstrip()
if not (end_string.endswith('.') or end_string.endswith('?') or
end_string.endswith('!')):
# Find the position for the missing punctuation, inside of any html
# tags.
desc_str = end_token.string.rstrip()
while desc_str.endswith('>'):
start_tag_index = desc_str.rfind('<')
if start_tag_index < 0:
break
desc_str = desc_str[:start_tag_index].rstrip()
end_position = Position(len(desc_str), 0)
self._HandleError(
errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,
('%s descriptions must end with valid punctuation such as a '
'period.' % token.string),
end_token, end_position)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.type and flag.type != '' and not flag.type.isspace():
self._CheckJsDocType(token)
if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
not state.InParentheses() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token);
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
not ('underscore' in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
def Finalize(self, state, tokenizer_mode):
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
# Check that the mode is not mid comment, argument list, etc.
if not tokenizer_mode == Modes.TEXT_MODE:
self._HandleError(
errors.FILE_IN_BLOCK,
'File ended in mode "%s".' % tokenizer_mode,
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
| bsd-3-clause |
HwisooSo/gemV-update | src/mem/slicc/ast/StatementListAST.py | 91 | 2240 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.AST import AST
class StatementListAST(AST):
def __init__(self, slicc, statements):
super(StatementListAST, self).__init__(slicc)
if not isinstance(statements, (list, tuple)):
statements = [ statements ]
self.statements = statements
def __repr__(self):
return "[StatementListAST: %r]" % self.statements
def generate(self, code, return_type):
for statement in self.statements:
statement.generate(code, return_type)
def findResources(self, resources):
for statement in self.statements:
statement.findResources(resources)
| bsd-3-clause |
icodemachine/Stem | stem/util/lru_cache.py | 7 | 7373 | # Drop in replace for python 3.2's collections.lru_cache, from...
# http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
#
# ... which is under the MIT license. Stem users should *not* rely upon this
# module. It will be removed when we drop support for python 3.2 and below.
"""
Memoization decorator that caches a function's return value. If later called
with the same arguments then the cached value is returned rather than
reevaluated.
This is a a python 2.x port of `functools.lru_cache
<http://docs.python.org/3/library/functools.html#functools.lru_cache>`_. If
using python 3.2 or later you should use that instead.
"""
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = set([int, str, frozenset, type(None)]),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| lgpl-3.0 |
alfasin/st2 | st2client/st2client/utils/httpclient.py | 3 | 4996 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
from pipes import quote as pquote
import requests
LOG = logging.getLogger(__name__)
def add_ssl_verify_to_kwargs(func):
def decorate(*args, **kwargs):
if isinstance(args[0], HTTPClient) and 'https' in getattr(args[0], 'root', ''):
cacert = getattr(args[0], 'cacert', None)
kwargs['verify'] = cacert if cacert else False
return func(*args, **kwargs)
return decorate
def add_auth_token_to_headers(func):
def decorate(*args, **kwargs):
token = kwargs.pop('token', None)
if token:
headers = kwargs.get('headers', dict())
headers['X-Auth-Token'] = str(token)
kwargs['headers'] = headers
return func(*args, **kwargs)
return decorate
def add_json_content_type_to_headers(func):
def decorate(*args, **kwargs):
headers = kwargs.get('headers', dict())
content_type = headers.get('content-type', 'application/json')
headers['content-type'] = content_type
kwargs['headers'] = headers
return func(*args, **kwargs)
return decorate
class HTTPClient(object):
def __init__(self, root, cacert=None, debug=False):
self.root = self._get_url_without_trailing_slash(root)
self.cacert = cacert
self.debug = debug
@add_ssl_verify_to_kwargs
@add_auth_token_to_headers
def get(self, url, **kwargs):
response = requests.get(self.root + url, **kwargs)
response = self._response_hook(response=response)
return response
@add_ssl_verify_to_kwargs
@add_auth_token_to_headers
@add_json_content_type_to_headers
def post(self, url, data, **kwargs):
response = requests.post(self.root + url, json.dumps(data), **kwargs)
response = self._response_hook(response=response)
return response
@add_ssl_verify_to_kwargs
@add_auth_token_to_headers
@add_json_content_type_to_headers
def put(self, url, data, **kwargs):
response = requests.put(self.root + url, json.dumps(data), **kwargs)
response = self._response_hook(response=response)
return response
@add_ssl_verify_to_kwargs
@add_auth_token_to_headers
@add_json_content_type_to_headers
def patch(self, url, data, **kwargs):
response = requests.patch(self.root + url, data, **kwargs)
response = self._response_hook(response=response)
return response
@add_ssl_verify_to_kwargs
@add_auth_token_to_headers
def delete(self, url, **kwargs):
response = requests.delete(self.root + url, **kwargs)
response = self._response_hook(response=response)
return response
def _response_hook(self, response):
if self.debug:
# Log cURL request line
curl_line = self._get_curl_line_for_request(request=response.request)
print("# -------- begin %d request ----------" % id(self))
print(curl_line)
print("# -------- begin %d response ----------" % (id(self)))
print(response.text)
print("# -------- end %d response ------------" % (id(self)))
print('')
return response
def _get_curl_line_for_request(self, request):
parts = ['curl']
# method
method = request.method.upper()
if method in ['HEAD']:
parts.extend(['--head'])
else:
parts.extend(['-X', pquote(method)])
# headers
for key, value in request.headers.items():
parts.extend(['-H ', pquote('%s: %s' % (key, value))])
# body
if request.body:
parts.extend(['--data-binary', pquote(request.body)])
# URL
parts.extend([pquote(request.url)])
curl_line = ' '.join(parts)
return curl_line
def _get_url_without_trailing_slash(self, value):
"""
Function which strips a trailing slash from the provided url if one is present.
:param value: URL to format.
:type value: ``str``
:rtype: ``str``
"""
result = value[:-1] if value.endswith('/') else value
return result
| apache-2.0 |
roadmapper/ansible | lib/ansible/module_utils/network/exos/exos.py | 20 | 8830 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONNECTION = None
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection = None
def get_capabilities(self):
"""Returns platform info of the remove device
"""
connection = self._get_connection()
return json.loads(connection.get_capabilities())
def _get_connection(self):
if not self._connection:
self._connection = Connection(self._module._socket_path)
return self._connection
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
if self._device_configs == {}:
connection = self._get_connection()
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
connection = self._get_connection()
try:
response = connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = self._get_connection()
try:
diff = conn.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
class HttpApi:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._connection_obj = None
def get_capabilities(self):
"""Returns platform info of the remove device
"""
try:
capabilities = self._connection.get_capabilities()
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return json.loads(capabilities)
@property
def _connection(self):
if not self._connection_obj:
self._connection_obj = Connection(self._module._socket_path)
return self._connection_obj
def get_config(self, flags=None):
"""Retrieves the current config from the device or cache
"""
flags = [] if flags is None else flags
if self._device_configs == {}:
try:
out = self._connection.get_config(flags=flags)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
self._device_configs = to_text(out, errors='surrogate_then_replace').strip()
return self._device_configs
def run_commands(self, commands, check_rc=True):
"""Runs list of commands on remote device and returns results
"""
try:
response = self._connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return response
def send_requests(self, requests):
"""Send a list of http requests to remote device and return results
"""
if requests is None:
raise ValueError("'requests' value is required")
responses = list()
for req in to_list(requests):
try:
response = self._connection.send_request(**req)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
responses.append(response)
return responses
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
try:
diff = self._connection.get_diff(candidate=candidate, running=running, diff_match=diff_match,
diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
except ConnectionError as exc:
self._module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return diff
def get_capabilities(module):
conn = get_connection(module)
return conn.get_capabilities()
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
connection_proxy = Connection(module._socket_path)
cap = json.loads(connection_proxy.get_capabilities())
if cap['network_api'] == 'cliconf':
conn = Cli(module)
elif cap['network_api'] == 'exosapi':
conn = HttpApi(module)
else:
module.fail_json(msg='Invalid connection type %s' % cap['network_api'])
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
def get_config(module, flags=None):
flags = None if flags is None else flags
conn = get_connection(module)
return conn.get_config(flags)
def load_config(module, commands):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands))
def run_commands(module, commands, check_rc=True):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands), check_rc=check_rc)
def to_command(module, commands):
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default='text'),
prompt=dict(type='list'),
answer=dict(type='list'),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(to_list(commands))
def send_requests(module, requests):
conn = get_connection(module)
return conn.send_requests(to_request(module, requests))
def to_request(module, requests):
transform = ComplexList(dict(
path=dict(key=True),
method=dict(),
data=dict(type='dict'),
), module)
return transform(to_list(requests))
def get_diff(module, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
conn = get_connection(module)
return conn.get_diff(candidate=candidate, running=running, diff_match=diff_match, diff_ignore_lines=diff_ignore_lines, path=path, diff_replace=diff_replace)
| gpl-3.0 |
userzimmermann/pytest | testing/test_argcomplete.py | 179 | 3582 | from __future__ import with_statement
import py, pytest
# test for _argcomplete but not specific for any application
def equal_with_bash(prefix, ffc, fc, out=None):
res = ffc(prefix)
res_bash = set(fc(prefix))
retval = set(res) == res_bash
if out:
out.write('equal_with_bash %s %s\n' % (retval, res))
if not retval:
out.write(' python - bash: %s\n' % (set(res) - res_bash))
out.write(' bash - python: %s\n' % (res_bash - set(res)))
return retval
# copied from argcomplete.completers as import from there
# also pulls in argcomplete.__init__ which opens filedescriptor 9
# this gives an IOError at the end of testrun
def _wrapcall(*args, **kargs):
try:
if py.std.sys.version_info > (2,7):
return py.std.subprocess.check_output(*args,**kargs).decode().splitlines()
if 'stdout' in kargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = py.std.subprocess.Popen(
stdout=py.std.subprocess.PIPE, *args, **kargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kargs.get("args")
if cmd is None:
cmd = args[0]
raise py.std.subprocess.CalledProcessError(retcode, cmd)
return output.decode().splitlines()
except py.std.subprocess.CalledProcessError:
return []
class FilesCompleter(object):
'File completer class, optionally takes a list of allowed extensions'
def __init__(self,allowednames=(),directories=True):
# Fix if someone passes in a string instead of a list
if type(allowednames) is str:
allowednames = [allowednames]
self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
self.directories = directories
def __call__(self, prefix, **kwargs):
completion = []
if self.allowednames:
if self.directories:
files = _wrapcall(['bash','-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion += [ f + '/' for f in files]
for x in self.allowednames:
completion += _wrapcall(['bash', '-c',
"compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
else:
completion += _wrapcall(['bash', '-c',
"compgen -A file -- '{p}'".format(p=prefix)])
anticomp = _wrapcall(['bash', '-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion = list( set(completion) - set(anticomp))
if self.directories:
completion += [f + '/' for f in anticomp]
return completion
class TestArgComplete:
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_compare_with_compgen(self):
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/ /d /data qqq'.split():
assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_remove_dir_prefix(self):
"""this is not compatible with compgen but it is with bash itself:
ls /usr/<TAB>
"""
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/usr/'.split():
assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
| mit |
paulmathews/nova | nova/api/openstack/compute/contrib/aggregates.py | 4 | 7879 | # Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': aggregates}
def create(self, req, body):
"""Creates an aggregate, given its name and availability_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
LOG.info(e)
raise exc.HTTPConflict
except exception.InvalidAggregateAction as e:
LOG.info(e)
raise
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_("Cannot show aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest
if len(updates) < 1:
raise exc.HTTPBadRequest
for key in updates.keys():
if not key in ["name", "availability_zone"]:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNotFound:
LOG.info(_("Cannot update aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_("Cannot delete aggregate: %(id)s") % locals())
raise exc.HTTPNotFound
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
try:
return _actions[action](req, id, data)
except KeyError:
msg = _("Aggregates does not have %s action") % action
raise exc.HTTPBadRequest(explanation=msg)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
LOG.info(_("Cannot add host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
except (exception.AggregateHostExists,
exception.InvalidAggregateAction):
LOG.info(_("Cannot add host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound):
LOG.info(_("Cannot remove host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
except exception.InvalidAggregateAction:
LOG.info(_("Cannot remove host %(host)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPConflict
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
LOG.info(_("Cannot set metadata %(metadata)s in aggregate "
"%(id)s") % locals())
raise exc.HTTPNotFound
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
return {"aggregate": aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration"""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00+00:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
| apache-2.0 |
liaotup/AutoDeployer | Tomcat Server (Git Client)/commanderService/main.py | 1 | 1639 | #! /bin/bash
# -*- coding: utf-8 -*
import urlparse
import os
import comm_log
import tornado.ioloop
import tornado.web
import tornado.options
from tornado.options import define, options
# 监听端口
define("port", default=8978, help="run on the given port", type=int)
# 日志输出
define("log", default=comm_log.get_logging('gohook'))
token = '888v8d6d66d7f8f87d6df78f9f9f9d8f'
script = '/home/AutoDeployer/'
def pull(projectName, isNeedReploy):
print 'AutoDeployer: Pull Project:'+projectName
os.system('sh '+script+'gitPull.sh ' + projectName)
if isNeedReploy == "true":
os.system('sh '+script+'tomcatDeloyer.sh ' + projectName)
class MainHandler(tornado.web.RequestHandler):
def get(self):
result = urlparse.urlparse(self.request.uri)
params = urlparse.parse_qs(result.query, True)
print 'AutoDeployer: Token name:' + params.get('token')[0]
print 'AutoDeployer: Project name:' + params.get('projectName')[0]
print 'AutoDeployer: isNeedReploy:' + params.get('isNeedReploy')[0]
if params.get('token')[0] == token:
pull(params.get('projectName')[0], params.get('isNeedReploy')[0])
self.write('Service has sense of it:'+params.get('projectName')[0] + " isNeedToRedeploy:" + params.get('isNeedReploy')[0])
else:
print 'AutoDeployer: request Error'
self.write('Request Error!')
application = tornado.web.Application([
(r"/gohook", MainHandler),
])
if __name__ == "__main__":
print 'AutoDeployer: System Inited!'
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| mit |
naziris/HomeSecPi | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| apache-2.0 |
redhat-openstack/rally | tests/unit/common/objects/test_deploy.py | 10 | 9189 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for db.deploy layer."""
import mock
from rally.common import objects
from rally import consts
from tests.unit import test
class DeploymentTestCase(test.TestCase):
def setUp(self):
super(DeploymentTestCase, self).setUp()
self.deployment = {
"uuid": "baa1bfb6-0c38-4f6c-9bd0-45968890e4f4",
"name": "",
"config": {},
"endpoint": {},
"status": consts.DeployStatus.DEPLOY_INIT,
}
self.resource = {
"id": 42,
"deployment_uuid": self.deployment["uuid"],
"provider_name": "provider",
"type": "some",
"info": {"key": "value"},
}
@mock.patch("rally.common.objects.deploy.db.deployment_create")
def test_init_with_create(self, mock_deployment_create):
mock_deployment_create.return_value = self.deployment
deploy = objects.Deployment()
mock_deployment_create.assert_called_once_with({})
self.assertEqual(deploy["uuid"], self.deployment["uuid"])
@mock.patch("rally.common.objects.deploy.db.deployment_create")
def test_init_without_create(self, mock_deployment_create):
deploy = objects.Deployment(deployment=self.deployment)
self.assertFalse(mock_deployment_create.called)
self.assertEqual(deploy["uuid"], self.deployment["uuid"])
@mock.patch("rally.common.objects.deploy.db.deployment_get")
def test_get(self, mock_deployment_get):
mock_deployment_get.return_value = self.deployment
deploy = objects.Deployment.get(self.deployment["uuid"])
mock_deployment_get.assert_called_once_with(self.deployment["uuid"])
self.assertEqual(deploy["uuid"], self.deployment["uuid"])
@mock.patch("rally.common.objects.deploy.db.deployment_delete")
@mock.patch("rally.common.objects.deploy.db.deployment_create")
def test_create_and_delete(self, mock_deployment_create,
mock_deployment_delete):
mock_deployment_create.return_value = self.deployment
deploy = objects.Deployment()
deploy.delete()
mock_deployment_delete.assert_called_once_with(self.deployment["uuid"])
@mock.patch("rally.common.objects.deploy.db.deployment_delete")
def test_delete_by_uuid(self, mock_deployment_delete):
objects.Deployment.delete_by_uuid(self.deployment["uuid"])
mock_deployment_delete.assert_called_once_with(self.deployment["uuid"])
@mock.patch("rally.common.objects.deploy.db.deployment_update")
@mock.patch("rally.common.objects.deploy.db.deployment_create")
def test_update(self, mock_deployment_create, mock_deployment_update):
mock_deployment_create.return_value = self.deployment
mock_deployment_update.return_value = {"opt": "val2"}
deploy = objects.Deployment(opt="val1")
deploy._update({"opt": "val2"})
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"], {"opt": "val2"})
self.assertEqual(deploy["opt"], "val2")
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_status(self, mock_deployment_update):
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.update_status(consts.DeployStatus.DEPLOY_FAILED)
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{"status": consts.DeployStatus.DEPLOY_FAILED},
)
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_name(self, mock_deployment_update):
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.update_name("new_name")
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{"name": "new_name"},
)
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_config(self, mock_deployment_update):
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.update_config({"opt": "val"})
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{"config": {"opt": "val"}},
)
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_endpoints(self, mock_deployment_update):
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
endpoints = {
"admin": objects.Endpoint("url", "user", "pwd", "tenant",
consts.EndpointPermission.ADMIN),
"users": [
objects.Endpoint("url1", "user1", "pwd1", "tenant1",
consts.EndpointPermission.USER),
objects.Endpoint("url2", "user2", "pwd2", "tenant2",
consts.EndpointPermission.USER),
]
}
expected_users = [u.to_dict(include_permission=True)
for u in endpoints["users"]]
deploy.update_endpoints(endpoints)
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{
"admin": endpoints["admin"].to_dict(include_permission=True),
"users": expected_users
})
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_empty_endpoints(self, mock_deployment_update):
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.update_endpoints({})
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"], {"admin": {}, "users": []})
@mock.patch("rally.common.objects.deploy.db.resource_create")
def test_add_resource(self, mock_resource_create):
mock_resource_create.return_value = self.resource
deploy = objects.Deployment(deployment=self.deployment)
resource = deploy.add_resource("provider", type="some",
info={"key": "value"})
self.assertEqual(resource["id"], self.resource["id"])
mock_resource_create.assert_called_once_with({
"deployment_uuid": self.deployment["uuid"],
"provider_name": "provider",
"type": "some",
"info": {"key": "value"},
})
@mock.patch("rally.common.objects.task.db.resource_delete")
def test_delete(self, mock_resource_delete):
objects.Deployment.delete_resource(42)
mock_resource_delete.assert_called_once_with(42)
@mock.patch("rally.common.objects.task.db.resource_get_all")
def test_get_resources(self, mock_resource_get_all):
mock_resource_get_all.return_value = [self.resource]
deploy = objects.Deployment(deployment=self.deployment)
resources = deploy.get_resources(provider_name="provider", type="some")
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["id"], self.resource["id"])
@mock.patch("rally.common.objects.deploy.datetime.datetime")
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_set_started(self, mock_deployment_update, mock_datetime):
mock_datetime.now = mock.Mock(return_value="fake_time")
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.set_started()
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{"started_at": "fake_time",
"status": consts.DeployStatus.DEPLOY_STARTED}
)
@mock.patch("rally.common.objects.deploy.datetime.datetime")
@mock.patch("rally.common.objects.deploy.db.deployment_update")
def test_update_set_completed(self, mock_deployment_update, mock_datetime):
mock_datetime.now = mock.Mock(return_value="fake_time")
mock_deployment_update.return_value = self.deployment
deploy = objects.Deployment(deployment=self.deployment)
deploy.set_completed()
mock_deployment_update.assert_called_once_with(
self.deployment["uuid"],
{"completed_at": "fake_time",
"status": consts.DeployStatus.DEPLOY_FINISHED}
)
| apache-2.0 |
munnerz/CouchPotatoServer | couchpotato/core/media/movie/providers/automation/popularmovies.py | 17 | 1465 | from couchpotato import fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'PopularMovies'
class PopularMovies(Automation):
interval = 1800
url = 'https://s3.amazonaws.com/popular-movies/movies.json'
def getIMDBids(self):
movies = []
retrieved_movies = self.getJsonData(self.url)
if retrieved_movies:
for movie in retrieved_movies:
imdb_id = movie.get('imdb_id')
info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True)
if self.isMinimalMovie(info):
movies.append(imdb_id)
return movies
config = [{
'name': 'popularmovies',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'popularmovies_automation',
'label': 'Popular Movies',
'description': 'Imports the <a href="http://movies.stevenlu.com/" target="_blank">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies" target="_blank">Steven Lu</a>',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]
| gpl-3.0 |
mezz64/home-assistant | homeassistant/components/demo/air_quality.py | 21 | 1585 | """Demo platform that offers fake air quality data."""
from homeassistant.components.air_quality import AirQualityEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Air Quality."""
async_add_entities(
[DemoAirQuality("Home", 14, 23, 100), DemoAirQuality("Office", 4, 16, None)]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoAirQuality(AirQualityEntity):
"""Representation of Air Quality data."""
def __init__(self, name, pm_2_5, pm_10, n2o):
"""Initialize the Demo Air Quality."""
self._name = name
self._pm_2_5 = pm_2_5
self._pm_10 = pm_10
self._n2o = n2o
@property
def name(self):
"""Return the name of the sensor."""
return f"Demo Air Quality {self._name}"
@property
def should_poll(self):
"""No polling needed for Demo Air Quality."""
return False
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._pm_2_5
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._pm_10
@property
def nitrogen_oxide(self):
"""Return the nitrogen oxide (N2O) level."""
return self._n2o
@property
def attribution(self):
"""Return the attribution."""
return "Powered by Home Assistant"
| apache-2.0 |
ArvinPan/pyzmq | zmq/auth/base.py | 7 | 10384 | """Base implementation of 0MQ authentication."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import logging
import zmq
from zmq.utils import z85
from zmq.utils.strtypes import bytes, unicode, b, u
from zmq.error import _check_version
from .certs import load_certificates
CURVE_ALLOW_ANY = '*'
VERSION = b'1.0'
class Authenticator(object):
"""Implementation of ZAP authentication for zmq connections.
Note:
- libzmq provides four levels of security: default NULL (which the Authenticator does
not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see.
- until you add policies, all incoming NULL connections are allowed
(classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
"""
def __init__(self, context=None, encoding='utf-8', log=None):
_check_version((4,0), "security")
self.context = context or zmq.Context.instance()
self.encoding = encoding
self.allow_any = False
self.zap_socket = None
self.whitelist = set()
self.blacklist = set()
# passwords is a dict keyed by domain and contains values
# of dicts with username:password pairs.
self.passwords = {}
# certs is dict keyed by domain and contains values
# of dicts keyed by the public keys from the specified location.
self.certs = {}
self.log = log or logging.getLogger('zmq.auth')
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
self.zap_socket.bind("inproc://zeromq.zap.01")
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.zap_socket.close()
self.zap_socket = None
def allow(self, *addresses):
"""Allow (whitelist) IP address(es).
Connections from addresses not in the whitelist will be rejected.
- For NULL, all clients from this address will be accepted.
- For PLAIN and CURVE, they will be allowed to continue with authentication.
whitelist is mutually exclusive with blacklist.
"""
if self.blacklist:
raise ValueError("Only use a whitelist or a blacklist, not both")
self.whitelist.update(addresses)
def deny(self, *addresses):
"""Deny (blacklist) IP address(es).
Addresses not in the blacklist will be allowed to continue with authentication.
Blacklist is mutually exclusive with whitelist.
"""
if self.whitelist:
raise ValueError("Only use a whitelist or a blacklist, not both")
self.blacklist.update(addresses)
def configure_plain(self, domain='*', passwords=None):
"""Configure PLAIN authentication for a given domain.
PLAIN authentication uses a plain-text password file.
To cover all domains, use "*".
You can modify the password file at any time; it is reloaded automatically.
"""
if passwords:
self.passwords[domain] = passwords
def configure_curve(self, domain='*', location=None):
"""Configure CURVE authentication for a given domain.
CURVE authentication uses a directory that holds all public client certificates,
i.e. their public keys.
To cover all domains, use "*".
You can add and remove certificates in that directory at any time.
To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
"""
# If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
# treat location as a directory that holds the certificates.
if location == CURVE_ALLOW_ANY:
self.allow_any = True
else:
self.allow_any = False
try:
self.certs[domain] = load_certificates(location)
except Exception as e:
self.log.error("Failed to load CURVE certs from %s: %s", location, e)
def handle_zap_message(self, msg):
"""Perform ZAP authentication"""
if len(msg) < 6:
self.log.error("Invalid ZAP message, not enough frames: %r", msg)
if len(msg) < 2:
self.log.error("Not enough information to reply")
else:
self._send_zap_reply(msg[1], b"400", b"Not enough frames")
return
version, request_id, domain, address, identity, mechanism = msg[:6]
credentials = msg[6:]
domain = u(domain, self.encoding, 'replace')
address = u(address, self.encoding, 'replace')
if (version != VERSION):
self.log.error("Invalid ZAP version: %r", msg)
self._send_zap_reply(request_id, b"400", b"Invalid version")
return
self.log.debug("version: %r, request_id: %r, domain: %r,"
" address: %r, identity: %r, mechanism: %r",
version, request_id, domain,
address, identity, mechanism,
)
# Is address is explicitly whitelisted or blacklisted?
allowed = False
denied = False
reason = b"NO ACCESS"
if self.whitelist:
if address in self.whitelist:
allowed = True
self.log.debug("PASSED (whitelist) address=%s", address)
else:
denied = True
reason = b"Address not in whitelist"
self.log.debug("DENIED (not in whitelist) address=%s", address)
elif self.blacklist:
if address in self.blacklist:
denied = True
reason = b"Address is blacklisted"
self.log.debug("DENIED (blacklist) address=%s", address)
else:
allowed = True
self.log.debug("PASSED (not in blacklist) address=%s", address)
# Perform authentication mechanism-specific checks if necessary
username = u("user")
if not denied:
if mechanism == b'NULL' and not allowed:
# For NULL, we allow if the address wasn't blacklisted
self.log.debug("ALLOWED (NULL)")
allowed = True
elif mechanism == b'PLAIN':
# For PLAIN, even a whitelisted address must authenticate
if len(credentials) != 2:
self.log.error("Invalid PLAIN credentials: %r", credentials)
self._send_zap_reply(request_id, b"400", b"Invalid credentials")
return
username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
allowed, reason = self._authenticate_plain(domain, username, password)
elif mechanism == b'CURVE':
# For CURVE, even a whitelisted address must authenticate
if len(credentials) != 1:
self.log.error("Invalid CURVE credentials: %r", credentials)
self._send_zap_reply(request_id, b"400", b"Invalid credentials")
return
key = credentials[0]
allowed, reason = self._authenticate_curve(domain, key)
if allowed:
self._send_zap_reply(request_id, b"200", b"OK", username)
else:
self._send_zap_reply(request_id, b"400", reason)
def _authenticate_plain(self, domain, username, password):
"""PLAIN ZAP authentication"""
allowed = False
reason = b""
if self.passwords:
# If no domain is not specified then use the default domain
if not domain:
domain = '*'
if domain in self.passwords:
if username in self.passwords[domain]:
if password == self.passwords[domain][username]:
allowed = True
else:
reason = b"Invalid password"
else:
reason = b"Invalid username"
else:
reason = b"Invalid domain"
if allowed:
self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s",
domain, username, password,
)
else:
self.log.debug("DENIED %s", reason)
else:
reason = b"No passwords defined"
self.log.debug("DENIED (PLAIN) %s", reason)
return allowed, reason
def _authenticate_curve(self, domain, client_key):
"""CURVE ZAP authentication"""
allowed = False
reason = b""
if self.allow_any:
allowed = True
reason = b"OK"
self.log.debug("ALLOWED (CURVE allow any client)")
else:
# If no explicit domain is specified then use the default domain
if not domain:
domain = '*'
if domain in self.certs:
# The certs dict stores keys in z85 format, convert binary key to z85 bytes
z85_client_key = z85.encode(client_key)
if self.certs[domain].get(z85_client_key):
allowed = True
reason = b"OK"
else:
reason = b"Unknown key"
status = "ALLOWED" if allowed else "DENIED"
self.log.debug("%s (CURVE) domain=%s client_key=%s",
status, domain, z85_client_key,
)
else:
reason = b"Unknown domain"
return allowed, reason
def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'):
"""Send a ZAP reply to finish the authentication."""
user_id = user_id if status_code == b'200' else b''
if isinstance(user_id, unicode):
user_id = user_id.encode(self.encoding, 'replace')
metadata = b'' # not currently used
self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
self.zap_socket.send_multipart(reply)
__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
| bsd-3-clause |
js0701/chromium-crosswalk | tools/perf/page_sets/polymer.py | 33 | 10354 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class PolymerPage(page_module.Page):
def __init__(self, url, page_set, run_no_page_interactions):
""" Base class for all polymer pages.
Args:
run_no_page_interactions: whether the page will run any interactions after
navigate steps.
"""
super(PolymerPage, self).__init__(
url=url,
shared_page_state_class=shared_page_state.SharedMobilePageState,
page_set=page_set)
self.script_to_evaluate_on_commit = '''
document.addEventListener("polymer-ready", function() {
window.__polymer_ready = true;
});
'''
self._run_no_page_interactions = run_no_page_interactions
def RunPageInteractions(self, action_runner):
# If a polymer page wants to customize its actions, it should
# override the PerformPageInteractions method instead of this method.
if self._run_no_page_interactions:
return
self.PerformPageInteractions(action_runner)
def PerformPageInteractions(self, action_runner):
""" Override this to perform actions after the page has navigated. """
pass
def RunNavigateSteps(self, action_runner):
super(PolymerPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
class PolymerCalculatorPage(PolymerPage):
def __init__(self, page_set, run_no_page_interactions):
super(PolymerCalculatorPage, self).__init__(
url=('http://www.polymer-project.org/components/paper-calculator/'
'demo.html'),
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
def PerformPageInteractions(self, action_runner):
self.TapButton(action_runner)
self.SlidePanel(action_runner)
def TapButton(self, action_runner):
with action_runner.CreateInteraction('PolymerAnimation', repeatable=True):
action_runner.TapElement(element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#standard'
).shadowRoot.querySelector(
'paper-calculator-key[label="5"]'
)''')
action_runner.Wait(2)
def SlidePanel(self, action_runner):
# only bother with this interaction if the drawer is hidden
opened = action_runner.EvaluateJavaScript('''
(function() {
var outer = document.querySelector("body /deep/ #outerPanels");
return outer.opened || outer.wideMode;
}());''')
if not opened:
with action_runner.CreateInteraction('PolymerAnimation', repeatable=True):
action_runner.SwipeElement(
left_start_ratio=0.1, top_start_ratio=0.2,
direction='left', distance=300, speed_in_pixels_per_second=5000,
element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#advanced'
).shadowRoot.querySelector(
'.handle-bar'
)''')
action_runner.WaitForJavaScriptCondition('''
var outer = document.querySelector("body /deep/ #outerPanels");
outer.opened || outer.wideMode;''')
class PolymerShadowPage(PolymerPage):
def __init__(self, page_set, run_no_page_interactions):
super(PolymerShadowPage, self).__init__(
url='http://www.polymer-project.org/components/paper-shadow/demo.html',
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
def PerformPageInteractions(self, action_runner):
with action_runner.CreateInteraction('ScrollAndShadowAnimation'):
action_runner.ExecuteJavaScript(
"document.getElementById('fab').scrollIntoView()")
action_runner.Wait(5)
self.AnimateShadow(action_runner, 'card')
#FIXME(wiltzius) disabling until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#self.AnimateShadow(action_runner, 'fab')
def AnimateShadow(self, action_runner, eid):
for i in range(1, 6):
action_runner.ExecuteJavaScript(
'document.getElementById("{0}").z = {1}'.format(eid, i))
action_runner.Wait(1)
class PolymerSampler(PolymerPage):
def __init__(self, page_set, anchor, run_no_page_interactions,
scrolling_page=False):
"""Page exercising interactions with a single Paper Sampler subpage.
Args:
page_set: Page set to inforporate this page into.
anchor: string indicating which subpage to load (matches the element
type that page is displaying)
scrolling_page: Whether scrolling the content pane is relevant to this
content page or not.
"""
super(PolymerSampler, self).__init__(
url=('http://www.polymer-project.org/components/%s/demo.html' % anchor),
page_set=page_set, run_no_page_interactions=run_no_page_interactions)
self.scrolling_page = scrolling_page
self.iframe_js = 'document'
def RunNavigateSteps(self, action_runner):
super(PolymerSampler, self).RunNavigateSteps(action_runner)
waitForLoadJS = """
window.Polymer.whenPolymerReady(function() {
%s.contentWindow.Polymer.whenPolymerReady(function() {
window.__polymer_ready = true;
})
});
""" % self.iframe_js
action_runner.ExecuteJavaScript(waitForLoadJS)
action_runner.WaitForJavaScriptCondition(
'window.__polymer_ready')
def PerformPageInteractions(self, action_runner):
#TODO(wiltzius) Add interactions for input elements and shadow pages
if self.scrolling_page:
# Only bother scrolling the page if its been marked as worthwhile
self.ScrollContentPane(action_runner)
self.TouchEverything(action_runner)
def ScrollContentPane(self, action_runner):
element_function = (self.iframe_js + '.querySelector('
'"core-scroll-header-panel").$.mainContainer')
with action_runner.CreateInteraction('Scroll_Page', repeatable=True):
action_runner.ScrollElement(use_touch=True,
direction='down',
distance='900',
element_function=element_function)
with action_runner.CreateInteraction('Scroll_Page', repeatable=True):
action_runner.ScrollElement(use_touch=True,
direction='up',
distance='900',
element_function=element_function)
def TouchEverything(self, action_runner):
tappable_types = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
'paper-tab',
'paper-toggle-button',
'x-shadow',
]
for tappable_type in tappable_types:
self.DoActionOnWidgetType(action_runner, tappable_type, self.TapWidget)
swipeable_types = ['paper-slider']
for swipeable_type in swipeable_types:
self.DoActionOnWidgetType(action_runner, swipeable_type, self.SwipeWidget)
def DoActionOnWidgetType(self, action_runner, widget_type, action_function):
# Find all widgets of this type, but skip any that are disabled or are
# currently active as they typically don't produce animation frames.
element_list_query = (self.iframe_js +
('.querySelectorAll("body %s:not([disabled]):'
'not([active])")' % widget_type))
roles_count_query = element_list_query + '.length'
for i in range(action_runner.EvaluateJavaScript(roles_count_query)):
element_query = element_list_query + ("[%d]" % i)
if action_runner.EvaluateJavaScript(
element_query + '.offsetParent != null'):
# Only try to tap on visible elements (offsetParent != null)
action_runner.ExecuteJavaScript(element_query + '.scrollIntoView()')
action_runner.Wait(1) # wait for page to settle after scrolling
action_function(action_runner, element_query)
def TapWidget(self, action_runner, element_function):
with action_runner.CreateInteraction('Tap_Widget', repeatable=True):
action_runner.TapElement(element_function=element_function)
action_runner.Wait(1) # wait for e.g. animations on the widget
def SwipeWidget(self, action_runner, element_function):
with action_runner.CreateInteraction('Swipe_Widget'):
action_runner.SwipeElement(element_function=element_function,
left_start_ratio=0.75,
speed_in_pixels_per_second=300)
class PolymerPageSet(story.StorySet):
def __init__(self, run_no_page_interactions=False):
super(PolymerPageSet, self).__init__(
archive_data_file='data/polymer.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
self.AddStory(PolymerCalculatorPage(self, run_no_page_interactions))
self.AddStory(PolymerShadowPage(self, run_no_page_interactions))
# Polymer Sampler subpages that are interesting to tap / swipe elements on
TAPPABLE_PAGES = [
'paper-button',
'paper-checkbox',
'paper-fab',
'paper-icon-button',
# crbug.com/394756
# 'paper-radio-button',
#FIXME(wiltzius) Disabling x-shadow until this issue is fixed:
# https://github.com/Polymer/paper-shadow/issues/12
#'paper-shadow',
'paper-tabs',
'paper-toggle-button',
]
for p in TAPPABLE_PAGES:
self.AddStory(PolymerSampler(
self, p, run_no_page_interactions=run_no_page_interactions))
# Polymer Sampler subpages that are interesting to scroll
SCROLLABLE_PAGES = [
'core-scroll-header-panel',
]
for p in SCROLLABLE_PAGES:
self.AddStory(PolymerSampler(
self, p, run_no_page_interactions=run_no_page_interactions,
scrolling_page=True))
for page in self:
assert (page.__class__.RunPageInteractions ==
PolymerPage.RunPageInteractions), (
'Pages in this page set must not override PolymerPage\' '
'RunPageInteractions method.')
| bsd-3-clause |
citrix-openstack-build/nova | nova/openstack/common/rootwrap/cmd.py | 13 | 4847 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Root wrapper for OpenStack services
Filters which commands a service is allowed to run as another user.
To use this with nova, you should set the following in
nova.conf:
rootwrap_config=/etc/nova/rootwrap.conf
You also need to let the nova user run nova-rootwrap
as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap
/etc/nova/rootwrap.conf *
Service packaging should deploy .filters files only on nodes where
they are needed, to avoid allowing more than is necessary.
"""
from __future__ import print_function
import ConfigParser
import logging
import os
import pwd
import signal
import subprocess
import sys
RC_UNAUTHORIZED = 99
RC_NOCOMMAND = 98
RC_BADCONFIG = 97
RC_NOEXECFOUND = 96
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _exit_error(execname, message, errorcode, log=True):
print("%s: %s" % (execname, message), file=sys.stderr)
if log:
logging.error(message)
sys.exit(errorcode)
def _getlogin():
try:
return os.getlogin()
except OSError:
return (os.getenv('USER') or
os.getenv('USERNAME') or
os.getenv('LOGNAME'))
def main():
# Split arguments, require at least a command
execname = sys.argv.pop(0)
if len(sys.argv) < 2:
_exit_error(execname, "No command specified", RC_NOCOMMAND, log=False)
configfile = sys.argv.pop(0)
userargs = sys.argv[:]
# Add ../ to sys.path to allow running from branch
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname),
os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
from nova.openstack.common.rootwrap import wrapper
# Load configuration
try:
rawconfig = ConfigParser.RawConfigParser()
rawconfig.read(configfile)
config = wrapper.RootwrapConfig(rawconfig)
except ValueError as exc:
msg = "Incorrect value in %s: %s" % (configfile, exc.message)
_exit_error(execname, msg, RC_BADCONFIG, log=False)
except ConfigParser.Error:
_exit_error(execname, "Incorrect configuration file: %s" % configfile,
RC_BADCONFIG, log=False)
if config.use_syslog:
wrapper.setup_syslog(execname,
config.syslog_log_facility,
config.syslog_log_level)
# Execute command if it matches any of the loaded filters
filters = wrapper.load_filters(config.filters_path)
try:
filtermatch = wrapper.match_filter(filters, userargs,
exec_dirs=config.exec_dirs)
if filtermatch:
command = filtermatch.get_command(userargs,
exec_dirs=config.exec_dirs)
if config.use_syslog:
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
_getlogin(), pwd.getpwuid(os.getuid())[0],
command, filtermatch.name))
obj = subprocess.Popen(command,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
preexec_fn=_subprocess_setup,
env=filtermatch.get_environment(userargs))
obj.wait()
sys.exit(obj.returncode)
except wrapper.FilterMatchNotExecutable as exc:
msg = ("Executable not found: %s (filter match = %s)"
% (exc.match.exec_path, exc.match.name))
_exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
except wrapper.NoFilterMatched:
msg = ("Unauthorized command: %s (no filter matched)"
% ' '.join(userargs))
_exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
| apache-2.0 |
s-store/sstore-soft | third_party/python/boto/cloudfront/identity.py | 47 | 4489 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity:
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.config = config
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
return self.config
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
if comment != None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
def delete(self):
return self.connection.delete_origin_access_identity(self.id, self.etag)
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig:
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.comment = comment
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += '</CloudFrontOriginAccessIdentityConfig>\n'
return s
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Comment':
self.comment = value
elif name == 'CallerReference':
self.caller_reference = value
else:
setattr(self, name, value)
class OriginAccessIdentitySummary:
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
| gpl-3.0 |
dwadler/QGIS | python/plugins/processing/gui/BatchPanel.py | 3 | 13573 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BatchPanel.py
---------------------
Date : November 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QTableWidgetItem, QComboBox, QHeaderView, QFileDialog, QMessageBox
from qgis.PyQt.QtCore import QDir, QFileInfo
from qgis.core import (Qgis,
QgsApplication,
QgsSettings,
QgsProcessingParameterDefinition)
from qgis.gui import QgsProcessingParameterWidgetContext
from qgis.utils import iface
from processing.gui.wrappers import WidgetWrapperFactory, WidgetWrapper
from processing.gui.BatchOutputSelectionPanel import BatchOutputSelectionPanel
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBatchPanel.ui'))
class BatchPanel(BASE, WIDGET):
PARAMETERS = "PARAMETERS"
OUTPUTS = "OUTPUTS"
def __init__(self, parent, alg):
super(BatchPanel, self).__init__(None)
self.setupUi(self)
self.wrappers = []
self.btnAdvanced.hide()
# Set icons
self.btnAdd.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemove.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnOpen.setIcon(QgsApplication.getThemeIcon('/mActionFileOpen.svg'))
self.btnSave.setIcon(QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnAdvanced.setIcon(QgsApplication.getThemeIcon("/processingAlgorithm.svg"))
self.alg = alg
self.parent = parent
self.btnAdd.clicked.connect(self.addRow)
self.btnRemove.clicked.connect(self.removeRows)
self.btnOpen.clicked.connect(self.load)
self.btnSave.clicked.connect(self.save)
self.btnAdvanced.toggled.connect(self.toggleAdvancedMode)
self.tblParameters.horizontalHeader().sectionDoubleClicked.connect(
self.fillParameterValues)
self.tblParameters.horizontalHeader().resizeSections(QHeaderView.ResizeToContents)
self.tblParameters.horizontalHeader().setDefaultSectionSize(250)
self.tblParameters.horizontalHeader().setMinimumSectionSize(150)
self.initWidgets()
def layerRegistryChanged(self):
pass
def initWidgets(self):
# If there are advanced parameters — show corresponding button
for param in self.alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
self.btnAdvanced.show()
break
# Determine column count
nOutputs = len(self.alg.destinationParameterDefinitions()) + 1
if nOutputs == 1:
nOutputs = 0
self.tblParameters.setColumnCount(
self.alg.countVisibleParameters())
# Table headers
column = 0
for param in self.alg.parameterDefinitions():
if param.isDestination():
continue
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(param.description()))
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
self.tblParameters.setColumnHidden(column, True)
column += 1
for out in self.alg.destinationParameterDefinitions():
if not out.flags() & QgsProcessingParameterDefinition.FlagHidden:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(out.description()))
column += 1
# Last column for indicating if output will be added to canvas
if len(self.alg.destinationParameterDefinitions()) > 0:
self.tblParameters.setHorizontalHeaderItem(
column, QTableWidgetItem(self.tr('Load in QGIS')))
# Add an empty row to begin
self.addRow()
self.tblParameters.horizontalHeader().resizeSections(QHeaderView.ResizeToContents)
self.tblParameters.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tblParameters.horizontalHeader().setStretchLastSection(True)
def load(self):
context = dataobjects.createContext()
settings = QgsSettings()
last_path = settings.value("/Processing/LastBatchPath", QDir.homePath())
filename, selected_filter = QFileDialog.getOpenFileName(self,
self.tr('Open Batch'), last_path,
self.tr('JSON files (*.json)'))
if filename:
last_path = QFileInfo(filename).path()
settings.setValue('/Processing/LastBatchPath', last_path)
with open(filename) as f:
values = json.load(f)
else:
# If the user clicked on the cancel button.
return
self.tblParameters.setRowCount(0)
try:
for row, alg in enumerate(values):
self.addRow()
params = alg[self.PARAMETERS]
outputs = alg[self.OUTPUTS]
column = 0
for param in self.alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
if param.isDestination():
continue
if param.name() in params:
value = eval(params[param.name()])
wrapper = self.wrappers[row][column]
wrapper.setParameterValue(value, context)
column += 1
for out in self.alg.destinationParameterDefinitions():
if out.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
if out.name() in outputs:
value = outputs[out.name()].strip("'")
widget = self.tblParameters.cellWidget(row, column)
widget.setValue(value)
column += 1
except TypeError:
QMessageBox.critical(
self,
self.tr('Error'),
self.tr('An error occurred while reading your file.'))
def save(self):
toSave = []
context = dataobjects.createContext()
for row in range(self.tblParameters.rowCount()):
algParams = {}
algOutputs = {}
col = 0
alg = self.alg
for param in alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
if param.isDestination():
continue
wrapper = self.wrappers[row][col]
# For compatibility with 3.x API, we need to check whether the wrapper is
# the deprecated WidgetWrapper class. If not, it's the newer
# QgsAbstractProcessingParameterWidgetWrapper class
# TODO QGIS 4.0 - remove
if issubclass(wrapper.__class__, WidgetWrapper):
widget = wrapper.widget
else:
widget = wrapper.wrappedWidget()
value = wrapper.parameterValue()
if not param.checkValueIsAcceptable(value, context):
self.parent.messageBar().pushMessage("", self.tr('Wrong or missing parameter value: {0} (row {1})').format(
param.description(), row + 1),
level=Qgis.Warning, duration=5)
return
algParams[param.name()] = param.valueAsPythonString(value, context)
col += 1
for out in alg.destinationParameterDefinitions():
if out.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
widget = self.tblParameters.cellWidget(row, col)
text = widget.getValue()
if text.strip() != '':
algOutputs[out.name()] = text.strip()
col += 1
else:
self.parent.messageBar().pushMessage("", self.tr('Wrong or missing output value: {0} (row {1})').format(
out.description(), row + 1),
level=Qgis.Warning, duration=5)
return
toSave.append({self.PARAMETERS: algParams, self.OUTPUTS: algOutputs})
settings = QgsSettings()
last_path = settings.value("/Processing/LastBatchPath", QDir.homePath())
filename, __ = QFileDialog.getSaveFileName(self,
self.tr('Save Batch'),
last_path,
self.tr('JSON files (*.json)'))
if filename:
if not filename.endswith('.json'):
filename += '.json'
last_path = QFileInfo(filename).path()
settings.setValue('/Processing/LastBatchPath', last_path)
with open(filename, 'w') as f:
json.dump(toSave, f)
def setCellWrapper(self, row, column, wrapper, context):
self.wrappers[row][column] = wrapper
# For compatibility with 3.x API, we need to check whether the wrapper is
# the deprecated WidgetWrapper class. If not, it's the newer
# QgsAbstractProcessingParameterWidgetWrapper class
# TODO QGIS 4.0 - remove
is_cpp_wrapper = not issubclass(wrapper.__class__, WidgetWrapper)
if is_cpp_wrapper:
widget_context = QgsProcessingParameterWidgetContext()
if iface is not None:
widget_context.setMapCanvas(iface.mapCanvas())
wrapper.setWidgetContext(widget_context)
widget = wrapper.createWrappedWidget(context)
else:
widget = wrapper.widget
self.tblParameters.setCellWidget(row, column, widget)
def addRow(self):
self.wrappers.append([None] * self.tblParameters.columnCount())
self.tblParameters.setRowCount(self.tblParameters.rowCount() + 1)
context = dataobjects.createContext()
wrappers = {}
row = self.tblParameters.rowCount() - 1
column = 0
for param in self.alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagHidden or param.isDestination():
continue
wrapper = WidgetWrapperFactory.create_wrapper(param, self.parent, row, column)
wrappers[param.name()] = wrapper
self.setCellWrapper(row, column, wrapper, context)
column += 1
for out in self.alg.destinationParameterDefinitions():
if out.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
self.tblParameters.setCellWidget(
row, column, BatchOutputSelectionPanel(
out, self.alg, row, column, self))
column += 1
if len(self.alg.destinationParameterDefinitions()) > 0:
item = QComboBox()
item.addItem(self.tr('Yes'))
item.addItem(self.tr('No'))
item.setCurrentIndex(0)
self.tblParameters.setCellWidget(row, column, item)
for wrapper in list(wrappers.values()):
wrapper.postInitialize(list(wrappers.values()))
def removeRows(self):
if self.tblParameters.rowCount() > 1:
self.wrappers.pop()
self.tblParameters.setRowCount(self.tblParameters.rowCount() - 1)
def fillParameterValues(self, column):
context = dataobjects.createContext()
wrapper = self.wrappers[0][column]
if wrapper is None:
# e.g. double clicking on a destination header
return
for row in range(1, self.tblParameters.rowCount()):
self.wrappers[row][column].setParameterValue(wrapper.parameterValue(), context)
def toggleAdvancedMode(self, checked):
for column, param in enumerate(self.alg.parameterDefinitions()):
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
self.tblParameters.setColumnHidden(column, not checked)
| gpl-2.0 |
OSSESAC/odoopubarquiluz | addons/note/tests/__init__.py | 159 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_note
checks = [
test_note,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mancoast/CPythonPyc_test | cpython/235_test_quopri.py | 24 | 5562 | from test import test_support
import unittest
from cStringIO import StringIO
from quopri import *
ENCSAMPLE = """\
Here's a bunch of special=20
=A1=A2=A3=A4=A5=A6=A7=A8=A9
=AA=AB=AC=AD=AE=AF=B0=B1=B2=B3
=B4=B5=B6=B7=B8=B9=BA=BB=BC=BD=BE
=BF=C0=C1=C2=C3=C4=C5=C6
=C7=C8=C9=CA=CB=CC=CD=CE=CF
=D0=D1=D2=D3=D4=D5=D6=D7
=D8=D9=DA=DB=DC=DD=DE=DF
=E0=E1=E2=E3=E4=E5=E6=E7
=E8=E9=EA=EB=EC=ED=EE=EF
=F0=F1=F2=F3=F4=F5=F6=F7
=F8=F9=FA=FB=FC=FD=FE=FF
characters... have fun!
"""
# First line ends with a space
DECSAMPLE = "Here's a bunch of special \n" + \
"""\
\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9
\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3
\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe
\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6
\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf
\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7
\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf
\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7
\xe8\xe9\xea\xeb\xec\xed\xee\xef
\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7
\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff
characters... have fun!
"""
class QuopriTestCase(unittest.TestCase):
# Each entry is a tuple of (plaintext, encoded string). These strings are
# used in the "quotetabs=0" tests.
STRINGS = (
# Some normal strings
('hello', 'hello'),
('''hello
there
world''', '''hello
there
world'''),
('''hello
there
world
''', '''hello
there
world
'''),
('\201\202\203', '=81=82=83'),
# Add some trailing MUST QUOTE strings
('hello ', 'hello=20'),
('hello\t', 'hello=09'),
# Some long lines. First, a single line of 108 characters
('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
# A line of exactly 76 characters, no soft line break should be needed
('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
# A line of 77 characters, forcing a soft line break at position 75,
# and a second line of exactly 2 characters (because the soft line
# break `=' sign counts against the line length limit).
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zz'''),
# A line of 151 characters, forcing a soft line break at position 75,
# with a second line of exactly 76 characters and no trailing =
('zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# A string containing a hard line break, but which the first line is
# 151 characters and the second line is exactly 76 characters. This
# should leave us with three lines, the first which has a soft line
# break, and which the second and third do not.
('''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# Now some really complex stuff ;)
(DECSAMPLE, ENCSAMPLE),
)
# These are used in the "quotetabs=1" tests.
ESTRINGS = (
('hello world', 'hello=20world'),
('hello\tworld', 'hello=09world'),
)
# These are used in the "header=1" tests.
HSTRINGS = (
('hello world', 'hello_world'),
('hello_world', 'hello=5Fworld'),
)
def test_encodestring(self):
for p, e in self.STRINGS:
self.assert_(encodestring(p) == e)
def test_decodestring(self):
for p, e in self.STRINGS:
self.assert_(decodestring(e) == p)
def test_idempotent_string(self):
for p, e in self.STRINGS:
self.assert_(decodestring(encodestring(e)) == e)
def test_encode(self):
for p, e in self.STRINGS:
infp = StringIO(p)
outfp = StringIO()
encode(infp, outfp, quotetabs=0)
self.assert_(outfp.getvalue() == e)
def test_decode(self):
for p, e in self.STRINGS:
infp = StringIO(e)
outfp = StringIO()
decode(infp, outfp)
self.assert_(outfp.getvalue() == p)
def test_embedded_ws(self):
for p, e in self.ESTRINGS:
self.assert_(encodestring(p, quotetabs=1) == e)
self.assert_(decodestring(e) == p)
def test_encode_header(self):
for p, e in self.HSTRINGS:
self.assert_(encodestring(p, header = 1) == e)
def test_decode_header(self):
for p, e in self.HSTRINGS:
self.assert_(decodestring(e, header = 1) == p)
def test_main():
test_support.run_unittest(QuopriTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
sils1297/PyGithub | github/IssueEvent.py | 74 | 4698 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.Issue
import github.NamedUser
class IssueEvent(github.GithubObject.CompletableGithubObject):
"""
This class represents IssueEvents as returned for example by http://developer.github.com/v3/todo
"""
@property
def actor(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._actor)
return self._actor.value
@property
def commit_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._commit_id)
return self._commit_id.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def event(self):
"""
:type: string
"""
self._completeIfNotSet(self._event)
return self._event.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue(self):
"""
:type: :class:`github.Issue.Issue`
"""
self._completeIfNotSet(self._issue)
return self._issue.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._actor = github.GithubObject.NotSet
self._commit_id = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._event = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["actor"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue" in attributes: # pragma no branch
self._issue = self._makeClassAttribute(github.Issue.Issue, attributes["issue"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
jlopp/statoshi | test/functional/rpc_getblockstats.py | 34 | 6874 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BitcoinTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| mit |
ralphwort/chef-repo | build/pyparsing/examples/scanExamples.py | 6 | 2363 | #
# scanExamples.py
#
# Illustration of using pyparsing's scanString,transformString, and searchString methods
#
# Copyright (c) 2004, 2006 Paul McGuire
#
from pyparsing import Word, alphas, alphanums, Literal, restOfLine, OneOrMore, \
empty, Suppress, replaceWith
# simulate some C++ code
testData = """
#define MAX_LOCS=100
#define USERNAME = "floyd"
#define PASSWORD = "swordfish"
a = MAX_LOCS;
CORBA::initORB("xyzzy", USERNAME, PASSWORD );
"""
#################
print("Example of an extractor")
print("----------------------")
# simple grammar to match #define's
ident = Word(alphas, alphanums+"_")
macroDef = Literal("#define") + ident.setResultsName("name") + "=" + restOfLine.setResultsName("value")
for t,s,e in macroDef.scanString( testData ):
print(t.name,":", t.value)
# or a quick way to make a dictionary of the names and values
# (return only key and value tokens, and construct dict from key-value pairs)
# - empty ahead of restOfLine advances past leading whitespace, does implicit lstrip during parsing
macroDef = Suppress("#define") + ident + Suppress("=") + empty + restOfLine
macros = dict(list(macroDef.searchString(testData)))
print("macros =", macros)
print()
#################
print("Examples of a transformer")
print("----------------------")
# convert C++ namespaces to mangled C-compatible names
scopedIdent = ident + OneOrMore( Literal("::").suppress() + ident )
scopedIdent.setParseAction(lambda t: "_".join(t))
print("(replace namespace-scoped names with C-compatible names)")
print(scopedIdent.transformString( testData ))
# or a crude pre-processor (use parse actions to replace matching text)
def substituteMacro(s,l,t):
if t[0] in macros:
return macros[t[0]]
ident.setParseAction( substituteMacro )
ident.ignore(macroDef)
print("(simulate #define pre-processor)")
print(ident.transformString( testData ))
#################
print("Example of a stripper")
print("----------------------")
from pyparsing import dblQuotedString, LineStart
# remove all string macro definitions (after extracting to a string resource table?)
stringMacroDef = Literal("#define") + ident + "=" + dblQuotedString + LineStart()
stringMacroDef.setParseAction( replaceWith("") )
print(stringMacroDef.transformString( testData ))
| apache-2.0 |
gmt/portage | pym/portage/tests/ebuild/test_array_fromfile_eof.py | 10 | 1114 | # Copyright 2009-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import array
import tempfile
from portage import _unicode_decode
from portage import _unicode_encode
from portage.tests import TestCase
class ArrayFromfileEofTestCase(TestCase):
def testArrayFromfileEof(self):
# This tests if the following python issue is fixed
# in the currently running version of python:
# http://bugs.python.org/issue5334
input_data = "an arbitrary string"
input_bytes = _unicode_encode(input_data,
encoding='utf_8', errors='strict')
f = tempfile.TemporaryFile()
f.write(input_bytes)
f.seek(0)
data = []
eof = False
while not eof:
a = array.array('B')
try:
a.fromfile(f, len(input_bytes) + 1)
except (EOFError, IOError):
# python-3.0 lost data here
eof = True
if not a:
eof = True
else:
try:
# Python >=3.2
data.append(a.tobytes())
except AttributeError:
data.append(a.tostring())
f.close()
self.assertEqual(input_data, _unicode_decode(b''.join(data),
encoding='utf_8', errors='strict'))
| gpl-2.0 |
MihaiMoldovanu/ansible | lib/ansible/module_utils/facts/network/hurd.py | 192 | 3142 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.network.base import Network, NetworkCollector
class HurdPfinetNetwork(Network):
"""
This is a GNU Hurd specific subclass of Network. It use fsysopts to
get the ip address and support only pfinet.
"""
platform = 'GNU'
_socket_dir = '/servers/socket/'
def populate(self, collected_facts=None):
network_facts = {}
fsysopts_path = self.module.get_bin_path('fsysopts')
if fsysopts_path is None:
return network_facts
socket_path = None
for l in ('inet', 'inet6'):
link = os.path.join(self._socket_dir, l)
if os.path.exists(link):
socket_path = link
break
# FIXME: extract to method
# FIXME: exit early on falsey socket_path and un-indent whole block
if socket_path:
rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
# FIXME: build up a interfaces datastructure, then assign into network_facts
network_facts['interfaces'] = []
for i in out.split():
if '=' in i and i.startswith('--'):
k, v = i.split('=', 1)
# remove '--'
k = k[2:]
if k == 'interface':
# remove /dev/ from /dev/eth0
v = v[5:]
network_facts['interfaces'].append(v)
network_facts[v] = {
'active': True,
'device': v,
'ipv4': {},
'ipv6': [],
}
current_if = v
elif k == 'address':
network_facts[current_if]['ipv4']['address'] = v
elif k == 'netmask':
network_facts[current_if]['ipv4']['netmask'] = v
elif k == 'address6':
address, prefix = v.split('/')
network_facts[current_if]['ipv6'].append({
'address': address,
'prefix': prefix,
})
return network_facts
class HurdNetworkCollector(NetworkCollector):
_platform = 'GNU'
_fact_class = HurdPfinetNetwork
| gpl-3.0 |
maxwward/SCOPEBak | askbot/migrations/0092_postize_vote_and_activity.py | 2 | 34359 | # encoding: utf-8
import datetime
import sys
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot.migrations import TERM_RED_BOLD, TERM_GREEN, TERM_RESET
from askbot.utils.console import ProgressBar
class Migration(DataMigration):
def forwards(self, orm):
message = "Connecting votes to posts"
num_votes = orm.Vote.objects.count()
for v in ProgressBar(orm.Vote.objects.iterator(), num_votes, message):
try:
if (v.content_type.app_label, v.content_type.model) == ('askbot', 'exercise'):
v.voted_post = orm.Post.objects.get(self_exercise__id=v.object_id)
elif (v.content_type.app_label, v.content_type.model) == ('askbot', 'problem'):
v.voted_post = orm.Post.objects.get(self_problem__id=v.object_id)
elif (v.content_type.app_label, v.content_type.model) == ('askbot', 'comment'):
v.voted_post = orm.Post.objects.get(self_comment__id=v.object_id)
else:
raise ValueError('Unknown vote subject!')
v.save()
except orm.Post.DoesNotExist:
print TERM_RED_BOLD, 'Post of type=%s, id=%s does not exist!!!' % (v.content_type.model, v.object_id)
v.delete()
###
# ContentType for Post model might not yet be present in the database
# (if migrations are applied in a row then contenttypes update is not called between them)
ct_post, c = orm['contenttypes.ContentType'].objects.get_or_create(app_label='askbot', model='post', defaults={'name': 'post'})
abandoned_activities = []
message = "Connecting activity objects to posts"
num_activities = orm.Activity.objects.count()
for a in ProgressBar(orm.Activity.objects.iterator(), num_activities, message):
# test if content_object for this activity exists - there might be a bunch of "abandoned" activities
#
# NOTE that if activity.content_object is gone then we cannot reliably recover it from activity.exercise
# - the latter is just a thread to which the activity is related! It might occasionally be the actual post
# the activity is related to, but it's not the general rule.
model_signature = '.'.join([a.content_type.app_label, a.content_type.model])
if not orm[model_signature].objects.filter(id=a.object_id).exists():
abandoned_activities.append(a)
continue
save = False
ct = a.content_type
if ct.app_label == 'askbot' and ct.model in ('exercise', 'problem', 'comment'):
a.content_type = ct_post
a.object_id = orm.Post.objects.get(**{'self_%s__id' % str(ct.model): a.object_id}).id
save = True
if a.exercise:
a.exercise_post = orm.Post.objects.get(self_exercise__id=a.exercise.id)
save = True
if save:
a.save()
if abandoned_activities:
# Remove "abandoned" activities
abandoned_activities_lst = [
(a.id, '.'.join([a.content_type.app_label, a.content_type.model]), a.object_id)
for a in abandoned_activities
]
print TERM_RED_BOLD, "!!! Abandoned activities num=%d, total num=%d:" % (len(abandoned_activities), orm.Activity.objects.count()), TERM_RESET
print TERM_GREEN, abandoned_activities_lst, TERM_RESET
for a in abandoned_activities:
a.delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'exercise_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_posts'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'self_problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'self_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Comment']"}),
'self_exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
# "Post-processing" - added manually to add support for URL mapping
'old_exercise_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_problem_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': True, 'blank': True, 'default': None, 'unique': 'True'}),
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('problem', 'revision'), ('exercise', 'revision'))", 'object_name': 'PostRevision'},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'unique': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_problem': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Problem']", 'null': 'True', 'blank': 'True'}),
'problem_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteExercise']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_votes'", 'null': 'True', 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
pedroma-gomezp/Gymkhana_Computer_Networks_II | gymkhana/steps/step5.py | 2 | 2162 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import socket
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
import threading
from gymkhana.aux.printing_format import green_nd_bold, yellow_nd_bold, colorfill, end_format
from gymkhana.aux.my_variables import uclm_url, uclm_port3, my_TCPserver_port
from gymkhana.steps.step import Step
class Step5(Step):
def __init__(self):
super().__init__()
def run(self, proxy_code):
print("{}{}{}".format(green_nd_bold,
"#### STEP 5: HTTP WEB PROXY\n",
end_format))
socketserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #internet, tcp
socketserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #reusable port
socketserver.bind(('', my_TCPserver_port))
socketserver.listen(30)
t = threading.Thread(target = myTCPserver, args = (socketserver,))
t.setDaemon(True)
t.start()
sockPROXY = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #internet, tcp
sockPROXY.connect( (uclm_url, uclm_port3) )
message = "{} {}".format(proxy_code,
my_TCPserver_port)
sockPROXY.send(message.encode())
msg, client = sockPROXY.recvfrom(1024)
print("{}{}{}{}".format(yellow_nd_bold,
colorfill,
msg.decode(),
end_format))
socketserver.close()
sockPROXY.close()
def myTCPserver(socketserver):
while True:
clientsocket, client = socketserver.accept()
t=threading.Thread(target=download_and_send_webpage, args=(clientsocket, ))
t.start()
def download_and_send_webpage(clientsock):
data = clientsock.recv(1024)
print(data.decode())
url = data.split()[1].decode()
print("Downloading file {}".format(url))
url_request = Request(url)
my_file = urlopen(url_request)
downloaded_file = my_file.read()
print("Sending file {}".format(url))
clientsock.send(downloaded_file)
clientsock.close()
| gpl-3.0 |
whitgroves/taskmap | .venvs/python3.6.0/lib/python3.6/site-packages/setuptools/command/build_ext.py | 193 | 13049 | import os
import sys
import itertools
import imp
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
six.PY3
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = _get_config_var_837('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
def _get_config_var_837(name):
"""
In https://github.com/pypa/setuptools/pull/837, we discovered
Python 3.3.0 exposes the extension suffix under the name 'SO'.
"""
if sys.version_info < (3, 3, 1):
name = 'SO'
return get_config_var(name)
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.