repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
thnee/ansible | lib/ansible/modules/network/system/_net_system.py | 10 | 3369 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_system
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage the system attributes on network devices
description:
- This module provides declarative management of node system attributes
on network devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
deprecated:
removed_in: "2.13"
alternative: Use platform-specific "[netos]_system" module
why: Updated modules released with more functionality
extends_documentation_fragment: network_agnostic
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a name or list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
net_system:
hostname: ios01
domain_name: test.example.com
domain_search:
- ansible.com
- redhat.com
- cisco.com
- name: domain search on single domain
net_system:
domain_search: ansible.com
- name: remove configuration
net_system:
state: absent
- name: configure DNS lookup sources
net_system:
lookup_source: MgmtEth0/0/CPU0/0
- name: configure name servers
net_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- hostname ios01
- ip domain name test.example.com
"""
| gpl-3.0 |
cooperra/antlr4 | runtime/Python3/src/antlr4/Lexer.py | 17 | 12466 | # [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, self list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, self list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from self software without specific prior written permission.
#
# self SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# self SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# A lexer is recognizer that draws input symbols from a character stream.
# lexer grammars result in a subclass of self object. A Lexer object
# uses simplified match() and error recovery mechanisms in the interest
# of speed.
#/
from io import StringIO
from antlr4.CommonTokenFactory import CommonTokenFactory
from antlr4.atn.LexerATNSimulator import LexerATNSimulator
from antlr4.InputStream import InputStream
from antlr4.Recognizer import Recognizer
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException, LexerNoViableAltException, RecognitionException
class TokenSource(object):
pass
class Lexer(Recognizer, TokenSource):
DEFAULT_MODE = 0
MORE = -2
SKIP = -3
DEFAULT_TOKEN_CHANNEL = Token.DEFAULT_CHANNEL
HIDDEN = Token.HIDDEN_CHANNEL
MIN_CHAR_VALUE = '\u0000'
MAX_CHAR_VALUE = '\uFFFE'
def __init__(self, input:InputStream):
super().__init__()
self._input = input
self._factory = CommonTokenFactory.DEFAULT
self._tokenFactorySourcePair = (self, input)
self._interp = None # child classes must populate this
# The goal of all lexer rules/methods is to create a token object.
# self is an instance variable as multiple rules may collaborate to
# create a single token. nextToken will return self object after
# matching lexer rule(s). If you subclass to allow multiple token
# emissions, then set self to the last token to be matched or
# something nonnull so that the auto token emit mechanism will not
# emit another token.
self._token = None
# What character index in the stream did the current token start at?
# Needed, for example, to get the text for current token. Set at
# the start of nextToken.
self._tokenStartCharIndex = -1
# The line on which the first character of the token resides#/
self._tokenStartLine = -1
# The character position of first character within the line#/
self._tokenStartColumn = -1
# Once we see EOF on char stream, next token will be EOF.
# If you have DONE : EOF ; then you see DONE EOF.
self._hitEOF = False
# The channel number for the current token#/
self._channel = Token.DEFAULT_CHANNEL
# The token type for the current token#/
self._type = Token.INVALID_TYPE
self._modeStack = []
self._mode = self.DEFAULT_MODE
# You can set the text for the current token to override what is in
# the input char buffer. Use setText() or can set self instance var.
#/
self._text = None
def reset(self):
# wack Lexer state variables
if self._input is not None:
self._input.seek(0) # rewind the input
self._token = None
self._type = Token.INVALID_TYPE
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = -1
self._tokenStartColumn = -1
self._tokenStartLine = -1
self._text = None
self._hitEOF = False
self._mode = Lexer.DEFAULT_MODE
self._modeStack = []
self._interp.reset()
# Return a token from self source; i.e., match a token on the char
# stream.
def nextToken(self):
if self._input is None:
raise IllegalStateException("nextToken requires a non-null input stream.")
# Mark start location in char stream so unbuffered streams are
# guaranteed at least have text of current token
tokenStartMarker = self._input.mark()
try:
while True:
if self._hitEOF:
self.emitEOF()
return self._token
self._token = None
self._channel = Token.DEFAULT_CHANNEL
self._tokenStartCharIndex = self._input.index
self._tokenStartColumn = self._interp.column
self._tokenStartLine = self._interp.line
self._text = None
continueOuter = False
while True:
self._type = Token.INVALID_TYPE
ttype = self.SKIP
try:
ttype = self._interp.match(self._input, self._mode)
except LexerNoViableAltException as e:
self.notifyListeners(e) # report error
self.recover(e)
if self._input.LA(1)==Token.EOF:
self._hitEOF = True
if self._type == Token.INVALID_TYPE:
self._type = ttype
if self._type == self.SKIP:
continueOuter = True
break
if self._type!=self.MORE:
break
if continueOuter:
continue
if self._token is None:
self.emit()
return self._token
finally:
# make sure we release marker after match or
# unbuffered char stream will keep buffering
self._input.release(tokenStartMarker)
# Instruct the lexer to skip creating a token for current lexer rule
# and look for another token. nextToken() knows to keep looking when
# a lexer rule finishes with token set to SKIP_TOKEN. Recall that
# if token==null at end of any token rule, it creates one for you
# and emits it.
#/
def skip(self):
self._type = self.SKIP
def more(self):
self._type = self.MORE
def mode(self, m:int):
self._mode = m
def pushMode(self, m:int):
if self._interp.debug:
print("pushMode " + str(m))
self._modeStack.append(self._mode)
self.mode(m)
def popMode(self):
if len(self._modeStack)==0:
raise Exception("Empty Stack")
if self._interp.debug:
print("popMode back to "+ self._modeStack[:-1])
self.mode( self._modeStack.pop() )
return self._mode
# Set the char stream and reset the lexer#/
@property
def inputStream(self):
return self._input
@inputStream.setter
def inputStream(self, input:InputStream):
self._input = None
self._tokenFactorySourcePair = (self, self._input)
self.reset()
self._input = input
self._tokenFactorySourcePair = (self, self._input)
@property
def sourceName(self):
return self._input.sourceName
# By default does not support multiple emits per nextToken invocation
# for efficiency reasons. Subclass and override self method, nextToken,
# and getToken (to push tokens into a list and pull from that list
# rather than a single variable as self implementation does).
#/
def emitToken(self, token:Token):
self._token = token
# The standard method called to automatically emit a token at the
# outermost lexical rule. The token object should point into the
# char buffer start..stop. If there is a text override in 'text',
# use that to set the token's text. Override self method to emit
# custom Token objects or provide a new factory.
#/
def emit(self):
t = self._factory.create(self._tokenFactorySourcePair, self._type, self._text, self._channel, self._tokenStartCharIndex,
self.getCharIndex()-1, self._tokenStartLine, self._tokenStartColumn)
self.emitToken(t)
return t
def emitEOF(self):
cpos = self.column
lpos = self.line
eof = self._factory.create(self._tokenFactorySourcePair, Token.EOF, None, Token.DEFAULT_CHANNEL, self._input.index,
self._input.index-1, lpos, cpos)
self.emitToken(eof)
return eof
@property
def type(self):
return self._type
@type.setter
def type(self, type:int):
self._type = type
@property
def line(self):
return self._interp.line
@line.setter
def line(self, line:int):
self._interp.line = line
@property
def column(self):
return self._interp.column
@column.setter
def column(self, column:int):
self._interp.column = column
# What is the index of the current character of lookahead?#/
def getCharIndex(self):
return self._input.index
# Return the text matched so far for the current token or any
# text override.
@property
def text(self):
if self._text is not None:
return self._text
else:
return self._interp.getText(self._input)
# Set the complete text of self token; it wipes any previous
# changes to the text.
@text.setter
def text(self, txt:str):
self._text = txt
# Return a list of all Token objects in input char stream.
# Forces load of all tokens. Does not include EOF token.
#/
def getAllTokens(self):
tokens = []
t = self.nextToken()
while t.type!=Token.EOF:
tokens.append(t)
t = self.nextToken()
return tokens
def notifyListeners(self, e:LexerNoViableAltException):
start = self._tokenStartCharIndex
stop = self._input.index
text = self._input.getText(start, stop)
msg = "token recognition error at: '" + self.getErrorDisplay(text) + "'"
listener = self.getErrorListenerDispatch()
listener.syntaxError(self, None, self._tokenStartLine, self._tokenStartColumn, msg, e)
def getErrorDisplay(self, s:str):
with StringIO() as buf:
for c in s:
buf.write(self.getErrorDisplayForChar(c))
return buf.getvalue()
def getErrorDisplayForChar(self, c:str):
if ord(c[0])==Token.EOF:
return "<EOF>"
elif c=='\n':
return "\\n"
elif c=='\t':
return "\\t"
elif c=='\r':
return "\\r"
else:
return c
def getCharErrorDisplay(self, c:str):
return "'" + self.getErrorDisplayForChar(c) + "'"
# Lexers can normally match any char in it's vocabulary after matching
# a token, so do the easy thing and just kill a character and hope
# it all works out. You can instead use the rule invocation stack
# to do sophisticated error recovery if you are in a fragment rule.
#/
def recover(self, re:RecognitionException):
if self._input.LA(1) != Token.EOF:
if isinstance(re, LexerNoViableAltException):
# skip a char and try again
self._interp.consume(self._input)
else:
# TODO: Do we lose character or line position information?
self._input.consume()
| bsd-3-clause |
apark263/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/fill_triangular.py | 12 | 5973 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FillTriangular bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as dist_util
from tensorflow.python.util import deprecation
__all__ = [
"FillTriangular",
]
class FillTriangular(bijector.Bijector):
"""Transforms vectors to triangular.
Triangular matrix elements are filled in a clockwise spiral.
Given input with shape `batch_shape + [d]`, produces output with
shape `batch_shape + [n, n]`, where
`n = (-1 + sqrt(1 + 8 * d))/2`.
This follows by solving the quadratic equation
`d = 1 + 2 + ... + n = n * (n + 1)/2`.
#### Example
```python
b = tfb.FillTriangular(upper=False)
b.forward([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
b = tfb.FillTriangular(upper=True)
b.forward([1, 2, 3, 4, 5, 6])
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
upper=False,
validate_args=False,
name="fill_triangular"):
"""Instantiates the `FillTriangular` bijector.
Args:
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
self._upper = upper
super(FillTriangular, self).__init__(
forward_min_event_ndims=1,
inverse_min_event_ndims=2,
validate_args=validate_args,
name=name)
def _forward(self, x):
return dist_util.fill_triangular(x, upper=self._upper)
def _inverse(self, y):
return dist_util.fill_triangular_inverse(y, upper=self._upper)
def _forward_log_det_jacobian(self, x):
return array_ops.zeros_like(x[..., 0])
def _inverse_log_det_jacobian(self, y):
return array_ops.zeros_like(y[..., 0, 0])
def _forward_event_shape(self, input_shape):
batch_shape, d = (input_shape[:-1],
tensor_shape.dimension_value(input_shape[-1]))
if d is None:
n = None
else:
n = vector_size_to_square_matrix_size(d, self.validate_args)
return batch_shape.concatenate([n, n])
def _inverse_event_shape(self, output_shape):
batch_shape, n1, n2 = (output_shape[:-2],
tensor_shape.dimension_value(output_shape[-2]),
tensor_shape.dimension_value(output_shape[-1]))
if n1 is None or n2 is None:
m = None
elif n1 != n2:
raise ValueError("Matrix must be square. (saw [{}, {}])".format(n1, n2))
else:
m = n1 * (n1 + 1) / 2
return batch_shape.concatenate([m])
def _forward_event_shape_tensor(self, input_shape_tensor):
batch_shape, d = input_shape_tensor[:-1], input_shape_tensor[-1]
n = vector_size_to_square_matrix_size(d, self.validate_args)
return array_ops.concat([batch_shape, [n, n]], axis=0)
def _inverse_event_shape_tensor(self, output_shape_tensor):
batch_shape, n = output_shape_tensor[:-2], output_shape_tensor[-1]
if self.validate_args:
is_square_matrix = check_ops.assert_equal(
n, output_shape_tensor[-2], message="Matrix must be square.")
with ops.control_dependencies([is_square_matrix]):
n = array_ops.identity(n)
d = math_ops.cast(n * (n + 1) / 2, output_shape_tensor.dtype)
return array_ops.concat([batch_shape, [d]], axis=0)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def vector_size_to_square_matrix_size(d, validate_args, name=None):
"""Convert a vector size to a matrix size."""
if isinstance(d, (float, int, np.generic, np.ndarray)):
n = (-1 + np.sqrt(1 + 8 * d)) / 2.
if float(int(n)) != n:
raise ValueError("Vector length is not a triangular number.")
return int(n)
else:
with ops.name_scope(name, "vector_size_to_square_matrix_size", [d]) as name:
n = (-1. + math_ops.sqrt(1 + 8. * math_ops.to_float(d))) / 2.
if validate_args:
with ops.control_dependencies([check_ops.assert_equal(
math_ops.to_float(math_ops.to_int32(n)), n,
message="Vector length is not a triangular number")]):
n = array_ops.identity(n)
return math_ops.cast(n, d.dtype)
| apache-2.0 |
vadimtk/chrome4sdp | tools/usb_gadget/package.py | 62 | 2539 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to package and upload the USB gadget framework.
"""
import argparse
import hashlib
import os
import StringIO
import urllib2
import zipfile
def MakeZip(directory=None, files=None):
"""Construct a zip file.
Args:
directory: Include Python source files from this directory
files: Include these files
Returns:
A tuple of the buffer containing the zip file and its MD5 hash.
"""
buf = StringIO.StringIO()
archive = zipfile.PyZipFile(buf, 'w')
if directory is not None:
archive.writepy(directory)
if files is not None:
for f in files:
archive.write(f, os.path.basename(f))
archive.close()
content = buf.getvalue()
buf.close()
md5 = hashlib.md5(content).hexdigest()
return content, md5
def EncodeBody(filename, buf):
return '\r\n'.join([
'--foo',
'Content-Disposition: form-data; name="file"; filename="{}"'
.format(filename),
'Content-Type: application/octet-stream',
'',
buf,
'--foo--',
''
])
def UploadZip(content, md5, host):
filename = 'usb_gadget-{}.zip'.format(md5)
req = urllib2.Request(url='http://{}/update'.format(host),
data=EncodeBody(filename, content))
req.add_header('Content-Type', 'multipart/form-data; boundary=foo')
urllib2.urlopen(req)
def main():
parser = argparse.ArgumentParser(
description='Package (and upload) the USB gadget framework.')
parser.add_argument(
'--dir', type=str, metavar='DIR',
help='package all Python files from DIR')
parser.add_argument(
'--zip-file', type=str, metavar='FILE',
help='save package as FILE')
parser.add_argument(
'--hash-file', type=str, metavar='FILE',
help='save package hash as FILE')
parser.add_argument(
'--upload', type=str, metavar='HOST[:PORT]',
help='upload package to target system')
parser.add_argument(
'files', metavar='FILE', type=str, nargs='*',
help='source files')
args = parser.parse_args()
content, md5 = MakeZip(directory=args.dir, files=args.files)
if args.zip_file:
with open(args.zip_file, 'wb') as zip_file:
zip_file.write(content)
if args.hash_file:
with open(args.hash_file, 'wb') as hash_file:
hash_file.write(md5)
if args.upload:
UploadZip(content, md5, args.upload)
if __name__ == '__main__':
main()
| bsd-3-clause |
neuronalysis/engulfing-core | vendor/bootstrap-datepicker/docs/conf.py | 276 | 8002 | # -*- coding: utf-8 -*-
#
# bootstrap-datepicker documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 2 14:45:57 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = ''
# The full version, including alpha/beta/rc tags.
#release = ''
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
primary_domain = 'js'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bootstrap-datepicker'
copyright = u'2013, eternicode'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['_themes',]
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bootstrap-datepickerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bootstrap-datepicker.tex', u'bootstrap-datepicker Documentation',
u'eternicode', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
[u'eternicode'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bootstrap-datepicker', u'bootstrap-datepicker Documentation',
u'eternicode', 'bootstrap-datepicker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/IPython/core/historyapp.py | 9 | 6048 | # encoding: utf-8
"""
An application for managing IPython history.
To be invoked as the `ipython history` subcommand.
"""
from __future__ import print_function
import os
import sqlite3
from traitlets.config.application import Application
from IPython.core.application import BaseIPythonApplication
from traitlets import Bool, Int, Dict
from IPython.utils.io import ask_yes_no
trim_hist_help = """Trim the IPython history database to the last 1000 entries.
This actually copies the last 1000 entries to a new database, and then replaces
the old file with the new. Use the `--keep=` argument to specify a number
other than 1000.
"""
clear_hist_help = """Clear the IPython history database, deleting all entries.
Because this is a destructive operation, IPython will prompt the user if they
really want to do this. Passing a `-f` flag will force clearing without a
prompt.
This is an handy alias to `ipython history trim --keep=0`
"""
class HistoryTrim(BaseIPythonApplication):
description = trim_hist_help
backup = Bool(False, config=True,
help="Keep the old history file as history.sqlite.<N>")
keep = Int(1000, config=True,
help="Number of recent lines to keep in the database.")
flags = Dict(dict(
backup = ({'HistoryTrim' : {'backup' : True}},
backup.get_metadata('help')
)
))
aliases=Dict(dict(
keep = 'HistoryTrim.keep'
))
def start(self):
profile_dir = self.profile_dir.location
hist_file = os.path.join(profile_dir, 'history.sqlite')
con = sqlite3.connect(hist_file)
# Grab the recent history from the current database.
inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
if len(inputs) <= self.keep:
print("There are already at most %d entries in the history database." % self.keep)
print("Not doing anything. Use --keep= argument to keep fewer entries")
return
print("Trimming history to the most recent %d entries." % self.keep)
inputs.pop() # Remove the extra element we got to check the length.
inputs.reverse()
if inputs:
first_session = inputs[0][0]
outputs = list(con.execute('SELECT session, line, output FROM '
'output_history WHERE session >= ?', (first_session,)))
sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
'sessions WHERE session >= ?', (first_session,)))
con.close()
# Create the new history database.
new_hist_file = os.path.join(profile_dir, 'history.sqlite.new')
i = 0
while os.path.exists(new_hist_file):
# Make sure we don't interfere with an existing file.
i += 1
new_hist_file = os.path.join(profile_dir, 'history.sqlite.new'+str(i))
new_db = sqlite3.connect(new_hist_file)
new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
new_db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
new_db.commit()
if inputs:
with new_db:
# Add the recent history into the new database.
new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
new_db.executemany('insert into history values (?,?,?,?)', inputs)
new_db.executemany('insert into output_history values (?,?,?)', outputs)
new_db.close()
if self.backup:
i = 1
backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
while os.path.exists(backup_hist_file):
i += 1
backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
os.rename(hist_file, backup_hist_file)
print("Backed up longer history file to", backup_hist_file)
else:
os.remove(hist_file)
os.rename(new_hist_file, hist_file)
class HistoryClear(HistoryTrim):
description = clear_hist_help
keep = Int(0, config=False,
help="Number of recent lines to keep in the database.")
force = Bool(False, config=True,
help="Don't prompt user for confirmation")
flags = Dict(dict(
force = ({'HistoryClear' : {'force' : True}},
force.get_metadata('help')),
f = ({'HistoryTrim' : {'force' : True}},
force.get_metadata('help')
)
))
aliases = Dict()
def start(self):
if self.force or ask_yes_no("Really delete all ipython history? ",
default="no", interrupt="no"):
HistoryTrim.start(self)
class HistoryApp(Application):
name = u'ipython-history'
description = "Manage the IPython history database."
subcommands = Dict(dict(
trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s" % \
(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
| artistic-2.0 |
cnsoft/kbengine-cocos2dx | kbe/res/scripts/common/Lib/lib2to3/fixer_base.py | 61 | 6839 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Base class for fixers (optional, but recommended)."""
# Python imports
import logging
import itertools
# Local imports
from .patcomp import PatternCompiler
from . import pygram
from .fixer_util import does_tree_import
class BaseFix(object):
"""Optional base class for fixers.
The subclass name must be FixFooBar where FooBar is the result of
removing underscores and capitalizing the words of the fix name.
For example, the class name for a fixer named 'has_key' should be
FixHasKey.
"""
PATTERN = None # Most subclasses should override with a string literal
pattern = None # Compiled pattern, set by compile_pattern()
pattern_tree = None # Tree representation of the pattern
options = None # Options object passed to initializer
filename = None # The filename (set by set_filename)
logger = None # A logger (set by set_filename)
numbers = itertools.count(1) # For new_name()
used_names = set() # A set of all used NAMEs
order = "post" # Does the fixer prefer pre- or post-order traversal
explicit = False # Is this ignored by refactor.py -f all?
run_order = 5 # Fixers will be sorted by run order before execution
# Lower numbers will be run first.
_accept_type = None # [Advanced and not public] This tells RefactoringTool
# which node type to accept when there's not a pattern.
keep_line_order = False # For the bottom matcher: match with the
# original line order
BM_compatible = False # Compatibility with the bottom matching
# module; every fixer should set this
# manually
# Shortcut for access to Python grammar symbols
syms = pygram.python_symbols
def __init__(self, options, log):
"""Initializer. Subclass may override.
Args:
options: an dict containing the options passed to RefactoringTool
that could be used to customize the fixer through the command line.
log: a list to append warnings and other messages to.
"""
self.options = options
self.log = log
self.compile_pattern()
def compile_pattern(self):
"""Compiles self.PATTERN into self.pattern.
Subclass may override if it doesn't want to use
self.{pattern,PATTERN} in .match().
"""
if self.PATTERN is not None:
PC = PatternCompiler()
self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN,
with_tree=True)
def set_filename(self, filename):
"""Set the filename, and a logger derived from it.
The main refactoring tool should call this.
"""
self.filename = filename
self.logger = logging.getLogger(filename)
def match(self, node):
"""Returns match for a given parse tree node.
Should return a true or false object (not necessarily a bool).
It may return a non-empty dict of matching sub-nodes as
returned by a matching pattern.
Subclass may override.
"""
results = {"node": node}
return self.pattern.match(node, results) and results
def transform(self, node, results):
"""Returns the transformation for a given parse tree node.
Args:
node: the root of the parse tree that matched the fixer.
results: a dict mapping symbolic names to part of the match.
Returns:
None, or a node that is a modified copy of the
argument node. The node argument may also be modified in-place to
effect the same change.
Subclass *must* override.
"""
raise NotImplementedError()
def new_name(self, template="xxx_todo_changeme"):
"""Return a string suitable for use as an identifier
The new name is guaranteed not to conflict with other identifiers.
"""
name = template
while name in self.used_names:
name = template + str(next(self.numbers))
self.used_names.add(name)
return name
def log_message(self, message):
if self.first_log:
self.first_log = False
self.log.append("### In file %s ###" % self.filename)
self.log.append(message)
def cannot_convert(self, node, reason=None):
"""Warn the user that a given chunk of code is not valid Python 3,
but that it cannot be converted automatically.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
for_output = node.clone()
for_output.prefix = ""
msg = "Line %d: could not convert: %s"
self.log_message(msg % (lineno, for_output))
if reason:
self.log_message(reason)
def warning(self, node, reason):
"""Used for warning the user about possible uncertainty in the
translation.
First argument is the top-level node for the code in question.
Optional second argument is why it can't be converted.
"""
lineno = node.get_lineno()
self.log_message("Line %d: %s" % (lineno, reason))
def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True
def finish_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the conclusion of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
pass
class ConditionalFix(BaseFix):
""" Base class for fixers which not execute if an import is found. """
# This is the name of the import which, if found, will cause the test to be skipped
skip_on = None
def start_tree(self, *args):
super(ConditionalFix, self).start_tree(*args)
self._should_skip = None
def should_skip(self, node):
if self._should_skip is not None:
return self._should_skip
pkg = self.skip_on.split(".")
name = pkg[-1]
pkg = ".".join(pkg[:-1])
self._should_skip = does_tree_import(pkg, name, node)
return self._should_skip
| lgpl-3.0 |
PythonScientists/Shape | env/lib/python3.5/site-packages/jinja2/compiler.py | 117 | 62929 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from functools import update_wrapper
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.optimizer import Optimizer
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap, izip
from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
code_features = ['division']
# does this python version support generator stops? (PEP 0479)
try:
exec('from __future__ import generator_stop')
code_features.append('generator_stop')
except SyntaxError:
pass
# does this python version support yield from?
try:
exec('def f(): yield from x()')
except SyntaxError:
supports_yield_from = False
else:
supports_yield_from = True
def optimizeconst(f):
def new_func(self, node, frame, **kwargs):
# Only optimize if the frame is not volatile
if self.optimized and not frame.eval_ctx.volatile:
new_node = self.optimizer.visit(node, frame.eval_ctx)
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
return update_wrapper(new_func, f)
def generate(node, environment, name, filename, stream=None,
defer_init=False, optimized=True):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init,
optimized)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
return True
if type(value) in (tuple, list, set, frozenset):
for item in value:
if not has_safe_repr(item):
return False
return True
elif type(value) is dict:
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class MacroRef(object):
def __init__(self, node):
self.node = node
self.accesses_caller = False
self.accesses_kwargs = False
self.accesses_varargs = False
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.symbols = Symbols(parent and parent.symbols or None)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# the parent of this frame
self.parent = parent
if parent is not None:
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.symbols = self.symbols.copy()
return rv
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
This is only used to implement if-statements.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False, optimized=True):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
self.optimized = optimized
if optimized:
self.optimizer = Optimizer(environment)
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# Tracks toplevel assignments
self._assign_stack = []
# Tracks parameter definition blocks
self._param_def_block = []
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
return
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
self.writeline('pass')
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def enter_frame(self, frame):
undefs = []
for target, (action, param) in iteritems(frame.symbols.loads):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
self.writeline('%s = resolve(%r)' %
(target, param))
elif action == VAR_LOAD_ALIAS:
self.writeline('%s = %s' % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
raise NotImplementedError('unknown load instruction')
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
undefs = []
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
self.writeline('%s = missing' % ' = '.join(undefs))
def func(self, name):
if self.environment.is_async:
return 'async def %s' % name
return 'def %s' % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
frame = frame.inner()
frame.symbols.analyze_node(node)
macro_ref = MacroRef(node)
explicit_caller = None
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
if arg.name == 'caller':
explicit_caller = idx
if arg.name in ('kwargs', 'varargs'):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
# In older Jinja2 versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
# checking this now and error out if it is anywhere else in
# the argument list.
if explicit_caller is not None:
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
self.fail('When defining macros or call blocks the '
'special "caller" argument must be omitted '
'or be given a default.', node.lineno)
else:
args.append(frame.symbols.declare_parameter('caller'))
macro_ref.accesses_caller = True
if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('kwargs'))
macro_ref.accesses_kwargs = True
if 'varargs' in undeclared and not 'varargs' in skip_special_params:
args.append(frame.symbols.declare_parameter('varargs'))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
self.indent()
self.buffer(frame)
self.enter_frame(frame)
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
self.writeline('if %s is missing:' % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
self.writeline('%s = undefined(%r, name=%r)' % (
ref,
'parameter %r was not provided' % arg.name,
arg.name))
else:
self.writeline('%s = ' % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
self.pop_parameter_definitions()
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame, force_unescaped=True)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
return frame, macro_ref
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
name = getattr(macro_ref.node, 'name', None)
if len(macro_ref.node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
'context.eval_ctx.autoescape)' %
(name, arg_tuple, macro_ref.accesses_kwargs,
macro_ref.accesses_varargs, macro_ref.accesses_caller))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
def dump_local_context(self, frame):
return '{%s}' % ', '.join(
'%r: %s' % (name, target) for name, target
in iteritems(frame.symbols.dump_stores()))
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
self.writeline('resolve = context.resolve_or_missing')
self.writeline('undefined = environment.undefined')
self.writeline('if 0: yield None')
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
stack that permits tracking of yet to be assigned parameters. In
particular this enables the optimization from `visit_Name` to skip
undefined expressions for parameters in macros as macros can reference
otherwise unbound parameters.
"""
self._param_def_block.append(frame.symbols.dump_param_targets())
def pop_parameter_definitions(self):
"""Pops the current parameter definitions set."""
self._param_def_block.pop()
def mark_parameter_stored(self, target):
"""Marks a parameter in the current parameter definitions as stored.
This will skip the enforced undefined checks.
"""
if self._param_def_block:
self._param_def_block[-1].discard(target)
def parameter_is_undeclared(self, target):
"""Checks if a given target is an undeclared parameter."""
if not self._param_def_block:
return False
return target in self._param_def_block[-1]
def push_assign_tracking(self):
"""Pushes a new layer for assignment tracking."""
self._assign_stack.append(set())
def pop_assign_tracking(self, frame):
"""Pops the topmost level for assignment tracking and updates the
context variables if necessary.
"""
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
public_names = [x for x in vars if x[:1] != '_']
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
self.writeline('context.vars[%r] = %s' % (name, ref))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(vars):
if idx:
self.write(', ')
ref = frame.symbols.ref(name)
self.write('%r: %s' % (name, ref))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import %s' % ', '.join(code_features))
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if self.environment.is_async:
self.writeline('from jinja2.asyncsupport import auto_await, '
'auto_aiter, make_async_loop_context')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('%s(context, missing=missing%s):' %
(self.func('root'), envenv), extra=1)
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
if 'self' in find_undeclared(node.body, ('self',)):
ref = frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
self.writeline('parent_template = None')
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.leave_frame(frame, with_python_scope=True)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
if supports_yield_from and not self.environment.is_async:
self.writeline('yield from parent_template.'
'root_render_func(context)')
else:
self.writeline('%sfor event in parent_template.'
'root_render_func(context):' %
(self.environment.is_async and 'async ' or ''))
self.indent()
self.writeline('yield event')
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
self.writeline('%s(context, missing=missing%s):' %
(self.func('block_' + name), envenv),
block, 1)
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
ref = block_frame.symbols.declare_parameter('self')
self.writeline('%s = TemplateReference(context)' % ref)
if 'super' in undeclared:
ref = block_frame.symbols.declare_parameter('super')
self.writeline('%s = context.super(%r, '
'block_%s)' % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 0
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and (
'context.derived(%s)' % self.dump_local_context(frame)) or 'context'
if supports_yield_from and not self.environment.is_async and \
frame.buffer is None:
self.writeline('yield from context.blocks[%r][0](%s)' % (
node.name, context), node)
else:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in context.blocks[%r][0](%s):' % (
loop, node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent()
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
skip_event_yield = False
if node.with_context:
loop = self.environment.is_async and 'async for' or 'for'
self.writeline('%s event in template.root_render_func('
'template.new_context(context.get_all(), True, '
'%s)):' % (loop, self.dump_local_context(frame)))
elif self.environment.is_async:
self.writeline('for event in (await '
'template._get_default_module_async())'
'._body_stream:')
else:
if supports_yield_from:
self.writeline('yield from template._get_default_module()'
'._body_stream')
skip_event_yield = True
else:
self.writeline('for event in template._get_default_module()'
'._body_stream:')
if not skip_event_yield:
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
self.writeline('%s = ' % frame.symbols.ref(node.target), node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
if self.environment.is_async:
self.write('await ')
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = %senvironment.get_template('
% (self.environment.is_async and 'await ' or ''))
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module%s(context.get_all(), True, %s)'
% (self.environment.is_async and '_async' or '',
self.dump_local_context(frame)))
elif self.environment.is_async:
self.write('_get_default_module_async()')
else:
self.write('_get_default_module()')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('%s = getattr(included_template, '
'%r, missing)' % (frame.symbols.ref(alias), name))
self.writeline('if %s is missing:' % frame.symbols.ref(alias))
self.indent()
self.writeline('%s = undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(frame.symbols.ref(alias),
'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = %s' %
(name, frame.symbols.ref(name)))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
loop_frame = frame.inner()
test_frame = frame.inner()
else_frame = frame.inner()
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
loop_ref = None
if extended_loop:
loop_ref = loop_frame.symbols.declare_parameter('loop')
loop_frame.symbols.analyze_node(node, for_branch='body')
if node.else_:
else_frame.symbols.analyze_node(node, for_branch='else')
if node.test:
loop_filter_func = self.temporary_identifier()
test_frame.symbols.analyze_node(node, for_branch='test')
self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
self.writeline(self.environment.is_async and 'async for ' or 'for ')
self.visit(node.target, loop_frame)
self.write(' in ')
self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
self.write(':')
self.indent()
self.writeline('if ', node.test)
self.visit(node.test, test_frame)
self.write(':')
self.indent()
self.writeline('yield ')
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
self.writeline('%s(reciter, loop_render_func, depth=0):' %
self.func('loop'), node)
self.indent()
self.buffer(loop_frame)
# Use the same buffer for the else frame
else_frame.buffer = loop_frame.buffer
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('%s = missing' % loop_ref)
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
self.write(', %s in await make_async_loop_context(' % loop_ref)
else:
self.write(', %s in LoopContext(' % loop_ref)
else:
self.write(' in ')
if node.test:
self.write('%s(' % loop_filter_func)
if node.recursive:
self.write('reciter')
else:
if self.environment.is_async and not extended_loop:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
self.write(')')
if node.test:
self.write(')')
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
self.leave_frame(loop_frame, with_python_scope=node.recursive
and not node.else_)
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
self.leave_frame(else_frame)
self.outdent()
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
self.write('await ')
self.write('loop(')
if self.environment.is_async:
self.write('auto_aiter(')
self.visit(node.iter, frame)
if self.environment.is_async:
self.write(')')
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
ref = frame.symbols.ref(node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('%s = ' % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
self.writeline('caller = ')
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.symbols.analyze_node(node)
self.enter_frame(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.leave_frame(filter_frame)
def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
self.newline()
self.visit(target, with_frame)
self.write(' = ')
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ',')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape'
' else to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(',')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(escape if context.eval_ctx.autoescape else'
' to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
self.write(' = ')
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
def visit_AssignBlock(self, node, frame):
self.push_assign_tracking()
block_frame = frame.inner()
# This is a special case. Since a set block always captures we
# will disable output checks. This way one can use set blocks
# toplevel even in extended templates.
block_frame.require_output_check = False
block_frame.symbols.analyze_node(node)
self.enter_frame(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
self.write(' = (Markup if context.eval_ctx.autoescape '
'else identity)(concat(%s))' % block_frame.buffer)
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
if node.ctx == 'load':
load = frame.symbols.find_load(ref)
if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
not self.parameter_is_undeclared(ref)):
self.write('(undefined(name=%r) if %s is missing else %s)' %
(node.name, ref, ref))
return
self.write(ref)
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
@optimizeconst
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
@optimizeconst
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
self.write('await auto_await(')
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
if self.environment.is_async:
self.write(')')
@optimizeconst
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
self.write('await auto_await(')
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
if self.environment.is_async:
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.symbols.analyze_node(node)
self.enter_frame(scope_frame)
self.blockvisit(node.body, scope_frame)
self.leave_frame(scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| apache-2.0 |
ruslanloman/nova | nova/api/openstack/compute/plugins/v3/security_groups.py | 33 | 23050 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import security_groups as \
schema_security_groups
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova.network.security_group import openstack_driver
from nova.virt import netutils
LOG = logging.getLogger(__name__)
ALIAS = 'os-security-groups'
ATTRIBUTE_NAME = 'security_groups'
authorize = extensions.os_compute_authorizer(ALIAS)
softauth = extensions.os_compute_soft_authorizer(ALIAS)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
class SecurityGroupControllerBase(wsgi.Controller):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
self.compute_api = compute.API(
security_group_api=self.security_group_api, skip_policy_check=True)
def _format_security_group_rule(self, context, rule, group_rule_data=None):
"""Return a secuity group rule in desired API response format.
If group_rule_data is passed in that is used rather than querying
for it.
"""
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
try:
source_group = self.security_group_api.get(
context, id=rule['group_id'])
except exception.SecurityGroupNotFound:
# NOTE(arosen): There is a possible race condition that can
# occur here if two api calls occur concurrently: one that
# lists the security groups and another one that deletes a
# security group rule that has a group_id before the
# group_id is fetched. To handle this if
# SecurityGroupNotFound is raised we return None instead
# of the rule and the caller should ignore the rule.
LOG.debug("Security Group ID %s does not exist",
rule['group_id'])
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
elif group_rule_data:
sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
formatted_rule = self._format_security_group_rule(context, rule)
if formatted_rule:
security_group['rules'] += [formatted_rule]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPBadRequest(
explanation=_("The request body can't be empty"))
value = body.get(key, None)
if value is None:
raise exc.HTTPBadRequest(
explanation=_("Missing parameter %s") % key)
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
security_group)}
@extensions.expected_errors((400, 404))
@wsgi.response(202)
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors(404)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@extensions.expected_errors((400, 403))
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
@extensions.expected_errors((400, 404))
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
try:
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@extensions.expected_errors((400, 403, 404))
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
try:
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id'))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except (exception.Invalid, exception.InvalidCidr) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
group_rule_data = None
try:
if sg_rule.get('group_id'):
source_group = self.security_group_api.get(
context, id=sg_rule['group_id'])
group_rule_data = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.SecurityGroupLimitExceeded as exp:
raise exc.HTTPForbidden(explanation=exp.format_message())
formatted_rule = self._format_security_group_rule(context,
security_group_rule,
group_rule_data)
return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = _authorize_context(req)
try:
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class ServerSecurityGroupController(SecurityGroupControllerBase):
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = common.get_instance(self.compute_api, context,
server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance.uuid, True)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
self.compute_api = compute.API(
security_group_api=self.security_group_api, skip_policy_check=True)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
instance = common.get_instance(self.compute_api, context, id)
method(context, instance, group_name)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
try:
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except (exception.SecurityGroupCannotBeApplied,
exception.SecurityGroupExistsForInstance) as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
@extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
try:
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
except (exception.SecurityGroupNotFound,
exception.InstanceNotFound) as exp:
raise exc.HTTPNotFound(explanation=exp.format_message())
except exception.NoUniqueMatch as exp:
raise exc.HTTPConflict(explanation=exp.format_message())
except exception.SecurityGroupNotExistsForInstance as exp:
raise exc.HTTPBadRequest(explanation=exp.format_message())
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=True))
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[ATTRIBUTE_NAME] = [{"name": group["name"]}
for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[ATTRIBUTE_NAME] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
# try converting to json
req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroups(extensions.V3APIExtensionBase):
"""Security group support."""
name = "SecurityGroups"
alias = ALIAS
version = 1
def get_controller_extensions(self):
secgrp_output_ext = extensions.ControllerExtension(
self, 'servers', SecurityGroupsOutputController())
secgrp_act_ext = extensions.ControllerExtension(
self, 'servers', SecurityGroupActionController())
return [secgrp_output_ext, secgrp_act_ext]
def get_resources(self):
secgrp_ext = extensions.ResourceExtension(ALIAS,
SecurityGroupController())
server_secgrp_ext = extensions.ResourceExtension(
ALIAS,
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
secgrp_rules_ext = extensions.ResourceExtension(
'os-security-group-rules',
controller=SecurityGroupRulesController())
return [secgrp_ext, server_secgrp_ext, secgrp_rules_ext]
# NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
# parameter as this is placed to handle scheduler_hint extension for V2.1.
def server_create(self, server_dict, create_kwargs, body_deprecated_param):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_group'] = [
sg['name'] for sg in security_groups if sg.get('name')]
create_kwargs['security_group'] = list(
set(create_kwargs['security_group']))
def get_server_create_schema(self):
return schema_security_groups.server_create
| apache-2.0 |
tttthemanCorp/CardmeleonAppEngine | django/conf/locale/nn/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause |
jart/tensorflow | tensorflow/contrib/timeseries/__init__.py | 46 | 1353 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A time series library in TensorFlow (TFTS).
@@StructuralEnsembleRegressor
@@ARRegressor
@@ARModel
@@CSVReader
@@NumpyReader
@@RandomWindowInputFn
@@WholeDatasetInputFn
@@predict_continuation_input_fn
@@TrainEvalFeatures
@@FilteringResults
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.timeseries.python.timeseries import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(module_name=__name__,
allowed_exception_list=['saved_model_utils'])
| apache-2.0 |
tgerla/ansible | lib/ansible/plugins/connections/__init__.py | 39 | 5781 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2015 Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import gettext
import select
import os
from abc import ABCMeta, abstractmethod, abstractproperty
from functools import wraps
from six import with_metaclass
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins import shell_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(with_metaclass(ABCMeta, object)):
'''
A base class for connections to contain common code.
'''
has_pipelining = False
become_methods = C.BECOME_METHODS
def __init__(self, play_context, new_stdin, *args, **kwargs):
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
self._play_context = play_context
if not hasattr(self, '_new_stdin'):
self._new_stdin = new_stdin
if not hasattr(self, '_display'):
self._display = display
if not hasattr(self, '_connected'):
self._connected = False
self.success_key = None
self.prompt = None
# load the shell plugin for this action/connection
if play_context.shell:
shell_type = play_context.shell
elif hasattr(self, '_shell_type'):
shell_type = getattr(self, '_shell_type')
else:
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
self._shell = shell_loader.get(shell_type)
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
if self._play_context.become_method in self.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
def set_host_overrides(self, host):
'''
An optional method, which can be used to set connection plugin parameters
from variables set on the host (or groups to which the host belongs)
Any connection plugin using this should first initialize its attributes in
an overridden `def __init__(self):`, and then use `host.get_vars()` to find
variables which may be used to set those attributes in this method.
'''
pass
@abstractproperty
def transport(self):
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self):
"""Connect to the host we've been initialized with"""
# Check if PE is supported
if self._play_context.become:
self.__become_method_supported()
@ensure_connect
@abstractmethod
def exec_command(self, cmd, tmp_path, in_data=None, executable=None, sudoable=True):
"""Run a command on the remote host"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
@abstractmethod
def close(self):
"""Terminate the connection"""
pass
def check_become_success(self, output):
return self._play_context.success_key in output
def check_password_prompt(self, output):
if self._play_context.prompt is None:
return False
elif isinstance(self._play_context.prompt, basestring):
return output.endswith(self._play_context.prompt)
else:
return self._play_context.prompt(output)
def check_incorrect_password(self, output):
incorrect_password = gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method])
if incorrect_password in output:
raise AnsibleError('Incorrect %s password' % self._play_context.become_method)
def lock_connection(self):
f = self._play_context.connection_lockfd
self._display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f))
fcntl.lockf(f, fcntl.LOCK_EX)
self._display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f))
def unlock_connection(self):
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
self._display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f))
| gpl-3.0 |
prarthitm/edxplatform | openedx/core/djangoapps/embargo/tests/test_middleware.py | 3 | 7481 | """
Tests for EmbargoMiddleware with CountryAccessRules
"""
from mock import patch
from nose.plugins.attrib import attr
import ddt
from django.core.urlresolvers import reverse
from django.conf import settings
from django.core.cache import cache as django_cache
from config_models.models import cache as config_cache
from openedx.core.djangolib.testing.utils import skip_unless_lms
from util.testing import UrlResetMixin
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from ..models import RestrictedCourse, IPFilter
from ..test_utils import restrict_course
@attr(shard=3)
@ddt.ddt
@skip_unless_lms
class EmbargoMiddlewareAccessTests(UrlResetMixin, ModuleStoreTestCase):
"""Tests of embargo middleware country access rules.
There are detailed unit tests for the rule logic in
`test_api.py`; here, we're mainly testing the integration
with middleware
"""
USERNAME = 'fred'
PASSWORD = 'secret'
URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(EmbargoMiddlewareAccessTests, self).setUp()
self.user = UserFactory(username=self.USERNAME, password=self.PASSWORD)
self.course = CourseFactory.create()
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(self.course.id)}
)
self.non_courseware_url = reverse('dashboard')
# Clear the cache to avoid interference between tests
django_cache.clear()
config_cache.clear()
@patch.dict(settings.FEATURES, {'EMBARGO': True})
@ddt.data(True, False)
def test_blocked(self, disable_access_check):
with restrict_course(self.course.id, access_point='courseware', disable_access_check=disable_access_check) as redirect_url: # pylint: disable=line-too-long
response = self.client.get(self.courseware_url)
if disable_access_check:
self.assertEqual(response.status_code, 200)
else:
self.assertRedirects(response, redirect_url)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_allowed(self):
# Add the course to the list of restricted courses
# but don't create any access rules
RestrictedCourse.objects.create(course_key=self.course.id)
# Expect that we can access courseware
response = self.client.get(self.courseware_url)
self.assertEqual(response.status_code, 200)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_non_courseware_url(self):
with restrict_course(self.course.id):
response = self.client.get(self.non_courseware_url)
self.assertEqual(response.status_code, 200)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
@ddt.data(
# request_ip, blacklist, whitelist, is_enabled, allow_access
('173.194.123.35', ['173.194.123.35'], [], True, False),
('173.194.123.35', ['173.194.0.0/16'], [], True, False),
('173.194.123.35', ['127.0.0.0/32', '173.194.0.0/16'], [], True, False),
('173.195.10.20', ['173.194.0.0/16'], [], True, True),
('173.194.123.35', ['173.194.0.0/16'], ['173.194.0.0/16'], True, False),
('173.194.123.35', [], ['173.194.0.0/16'], True, True),
('192.178.2.3', [], ['173.194.0.0/16'], True, True),
('173.194.123.35', ['173.194.123.35'], [], False, True),
)
@ddt.unpack
def test_ip_access_rules(self, request_ip, blacklist, whitelist, is_enabled, allow_access):
# Ensure that IP blocking works for anonymous users
self.client.logout()
# Set up the IP rules
IPFilter.objects.create(
blacklist=", ".join(blacklist),
whitelist=", ".join(whitelist),
enabled=is_enabled
)
# Check that access is enforced
response = self.client.get(
"/",
HTTP_X_FORWARDED_FOR=request_ip,
REMOTE_ADDR=request_ip
)
if allow_access:
self.assertEqual(response.status_code, 200)
else:
redirect_url = reverse(
'embargo_blocked_message',
kwargs={
'access_point': 'courseware',
'message_key': 'embargo'
}
)
self.assertRedirects(response, redirect_url)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
@ddt.data(
('courseware', 'default'),
('courseware', 'embargo'),
('enrollment', 'default'),
('enrollment', 'embargo')
)
@ddt.unpack
def test_always_allow_access_to_embargo_messages(self, access_point, msg_key):
# Blacklist an IP address
IPFilter.objects.create(
blacklist="192.168.10.20",
enabled=True
)
url = reverse(
'embargo_blocked_message',
kwargs={
'access_point': access_point,
'message_key': msg_key
}
)
response = self.client.get(
url,
HTTP_X_FORWARDED_FOR="192.168.10.20",
REMOTE_ADDR="192.168.10.20"
)
self.assertEqual(response.status_code, 200)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_whitelist_ip_skips_country_access_checks(self):
# Whitelist an IP address
IPFilter.objects.create(
whitelist="192.168.10.20",
enabled=True
)
# Set up country access rules so the user would
# be restricted from the course.
with restrict_course(self.course.id):
# Make a request from the whitelisted IP address
response = self.client.get(
self.courseware_url,
HTTP_X_FORWARDED_FOR="192.168.10.20",
REMOTE_ADDR="192.168.10.20"
)
# Expect that we were still able to access the page,
# even though we would have been blocked by country
# access rules.
self.assertEqual(response.status_code, 200)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_always_allow_course_detail_access(self):
""" Access to the Course Structure API's course detail endpoint should always be granted. """
# Make the user staff so that it has permissions to access the views.
self.user.is_staff = True
self.user.save() # pylint: disable=no-member
# Blacklist an IP address
ip_address = "192.168.10.20"
IPFilter.objects.create(
blacklist=ip_address,
enabled=True
)
url = reverse('course_structure_api:v0:detail', kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(
url,
HTTP_X_FORWARDED_FOR=ip_address,
REMOTE_ADDR=ip_address
)
self.assertEqual(response.status_code, 200)
# Test with a fully-restricted course
with restrict_course(self.course.id):
response = self.client.get(
url,
HTTP_X_FORWARDED_FOR=ip_address,
REMOTE_ADDR=ip_address
)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
aferr/TemporalPartitioningMemCtl | src/mem/cache/tags/iic_repl/Repl.py | 19 | 1901 | # Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
class Repl(SimObject):
type = 'Repl'
abstract = True
class GenRepl(Repl):
type = 'GenRepl'
fresh_res = Param.Int("Fresh pool residency time")
num_pools = Param.Int("Number of priority pools")
pool_res = Param.Int("Pool residency time")
| bsd-3-clause |
aaron-fz/neutron_full_sync | neutron/tests/unit/nec/test_nec_agent.py | 5 | 15244 | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import itertools
import time
import mock
from oslo.config import cfg
from six import moves
import testtools
from neutron.agent.linux import ovs_lib
from neutron.extensions import securitygroup as ext_sg
from neutron.plugins.nec.agent import nec_neutron_agent
from neutron.tests import base
DAEMON_LOOP_COUNT = 10
OVS_DPID = '00000629355b6943'
OVS_DPID_0X = '0x' + OVS_DPID
class TestNecAgentBase(base.BaseTestCase):
def setUp(self):
super(TestNecAgentBase, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('host', 'dummy-host')
with contextlib.nested(
mock.patch.object(ovs_lib.OVSBridge, 'get_datapath_id',
return_value=OVS_DPID),
mock.patch('socket.gethostname', return_value='dummy-host'),
mock.patch('neutron.openstack.common.loopingcall.'
'FixedIntervalLoopingCall'),
mock.patch('neutron.agent.rpc.PluginReportStateAPI')
) as (get_datapath_id, gethostname,
loopingcall, state_rpc_api):
kwargs = {'integ_br': 'integ_br',
'root_helper': 'dummy_wrapper',
'polling_interval': 1}
self.agent = nec_neutron_agent.NECNeutronAgent(**kwargs)
self.loopingcall = loopingcall
self.state_rpc_api = state_rpc_api
class TestNecAgent(TestNecAgentBase):
def _setup_mock(self):
vif_ports = [ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1',
self.agent.int_br),
ovs_lib.VifPort('port2', '2', 'id-2', 'mac-2',
self.agent.int_br)]
self.get_vif_ports = mock.patch.object(
ovs_lib.OVSBridge, 'get_vif_ports',
return_value=vif_ports).start()
self.update_ports = mock.patch.object(
nec_neutron_agent.NECPluginApi, 'update_ports').start()
self.prepare_devices_filter = mock.patch.object(
self.agent.sg_agent, 'prepare_devices_filter').start()
self.remove_devices_filter = mock.patch.object(
self.agent.sg_agent, 'remove_devices_filter').start()
def _test_single_loop(self, with_exc=False, need_sync=False):
self.agent.cur_ports = ['id-0', 'id-1']
self.agent.need_sync = need_sync
self.agent.loop_handler()
if with_exc:
self.assertEqual(self.agent.cur_ports, ['id-0', 'id-1'])
self.assertTrue(self.agent.need_sync)
else:
self.assertEqual(self.agent.cur_ports, ['id-1', 'id-2'])
self.assertFalse(self.agent.need_sync)
def test_single_loop_normal(self):
self._setup_mock()
self._test_single_loop()
agent_id = 'nec-q-agent.dummy-host'
self.update_ports.assert_called_once_with(
mock.ANY, agent_id, OVS_DPID_0X,
[{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}],
['id-0'])
self.prepare_devices_filter.assert_called_once_with(['id-2'])
self.remove_devices_filter.assert_called_once_with(['id-0'])
def test_single_loop_need_sync(self):
self._setup_mock()
self._test_single_loop(need_sync=True)
agent_id = 'nec-q-agent.dummy-host'
self.update_ports.assert_called_once_with(
mock.ANY, agent_id, OVS_DPID_0X,
[{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'},
{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}],
[])
self.prepare_devices_filter.assert_called_once_with(['id-1', 'id-2'])
self.assertFalse(self.remove_devices_filter.call_count)
def test_single_loop_with_sg_exception_remove(self):
self._setup_mock()
self.update_ports.side_effect = Exception()
self._test_single_loop(with_exc=True)
def test_single_loop_with_sg_exception_prepare(self):
self._setup_mock()
self.prepare_devices_filter.side_effect = Exception()
self._test_single_loop(with_exc=True)
def test_single_loop_with_update_ports_exception(self):
self._setup_mock()
self.remove_devices_filter.side_effect = Exception()
self._test_single_loop(with_exc=True)
def test_daemon_loop(self):
def state_check(index):
self.assertEqual(len(self.vif_ports_scenario[index]),
len(self.agent.cur_ports))
# Fake time.sleep to stop the infinite loop in daemon_loop()
self.sleep_count = 0
def sleep_mock(*args, **kwargs):
state_check(self.sleep_count)
self.sleep_count += 1
if self.sleep_count >= DAEMON_LOOP_COUNT:
raise RuntimeError()
vif_ports = [ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1',
self.agent.int_br),
ovs_lib.VifPort('port2', '2', 'id-2', 'mac-2',
self.agent.int_br)]
self.vif_ports_scenario = [[], [], vif_ports[0:1], vif_ports[0:2],
vif_ports[1:2], []]
# Ensure vif_ports_scenario is longer than DAEMON_LOOP_COUNT
if len(self.vif_ports_scenario) < DAEMON_LOOP_COUNT:
self.vif_ports_scenario.extend(
[] for _i in moves.xrange(DAEMON_LOOP_COUNT -
len(self.vif_ports_scenario)))
with contextlib.nested(
mock.patch.object(time, 'sleep', side_effect=sleep_mock),
mock.patch.object(ovs_lib.OVSBridge, 'get_vif_ports'),
mock.patch.object(nec_neutron_agent.NECPluginApi, 'update_ports'),
mock.patch.object(self.agent.sg_agent, 'prepare_devices_filter'),
mock.patch.object(self.agent.sg_agent, 'remove_devices_filter')
) as (sleep, get_vif_potrs, update_ports,
prepare_devices_filter, remove_devices_filter):
get_vif_potrs.side_effect = self.vif_ports_scenario
with testtools.ExpectedException(RuntimeError):
self.agent.daemon_loop()
self.assertEqual(update_ports.call_count, 4)
self.assertEqual(sleep.call_count, DAEMON_LOOP_COUNT)
agent_id = 'nec-q-agent.dummy-host'
expected = [
mock.call(mock.ANY, agent_id, OVS_DPID_0X,
[{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'}],
[]),
mock.call(mock.ANY, agent_id, OVS_DPID_0X,
[{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}],
[]),
mock.call(mock.ANY, agent_id, OVS_DPID_0X,
[], ['id-1']),
mock.call(mock.ANY, agent_id, OVS_DPID_0X,
[], ['id-2'])
]
update_ports.assert_has_calls(expected)
expected = [mock.call(['id-1']),
mock.call(['id-2'])]
self.assertEqual(prepare_devices_filter.call_count, 2)
prepare_devices_filter.assert_has_calls(expected)
self.assertEqual(remove_devices_filter.call_count, 2)
remove_devices_filter.assert_has_calls(expected)
sleep.assert_called_with(self.agent.polling_interval)
def test_report_state_installed(self):
self.loopingcall.assert_called_once_with(self.agent._report_state)
instance = self.loopingcall.return_value
self.assertTrue(instance.start.called)
def _check_report_state(self, cur_ports, num_ports, fail_mode,
first=False):
self.assertEqual(first or fail_mode,
'start_flag' in self.agent.agent_state)
self.agent.cur_ports = cur_ports
self.agent._report_state()
self.assertEqual(fail_mode,
'start_flag' in self.agent.agent_state)
self.assertEqual(self.agent.
agent_state['configurations']['devices'],
num_ports)
self.num_ports_hist.append(num_ports)
def _test_report_state(self, fail_mode):
log_mocked = mock.patch.object(nec_neutron_agent, 'LOG')
log_patched = log_mocked.start()
def record_state(*args, **kwargs):
self.record_calls.append(copy.deepcopy(args))
if fail_mode:
raise Exception()
self.record_calls = []
self.num_ports_hist = []
state_rpc = self.state_rpc_api.return_value
state_rpc.report_state.side_effect = record_state
dummy_vif = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', None)
self.state_rpc_api.assert_called_once_with('q-plugin')
self.assertIn('start_flag', self.agent.agent_state)
self._check_report_state([], 0, fail_mode, first=True)
self._check_report_state([dummy_vif] * 2, 2, fail_mode)
self._check_report_state([dummy_vif] * 5, 5, fail_mode)
self._check_report_state([], 0, fail_mode)
# Since loopingcall start is mocked, call_count is same as
# the call count of check_report_state.
self.assertEqual(state_rpc.report_state.call_count, 4)
self.assertEqual(len(self.record_calls), 4)
for i, x in enumerate(itertools.izip(self.record_calls,
self.num_ports_hist)):
rec, num_ports = x
expected_state = {
'binary': 'neutron-nec-agent',
'host': 'dummy-host',
'topic': 'N/A',
'configurations': {'devices': 0},
'agent_type': 'NEC plugin agent'}
expected_state['configurations']['devices'] = num_ports
if i == 0 or fail_mode:
expected_state['start_flag'] = True
self.assertEqual(expected_state, rec[1])
self.assertEqual(fail_mode, log_patched.exception.called)
def test_report_state(self):
self._test_report_state(fail_mode=False)
def test_report_state_fail(self):
self._test_report_state(fail_mode=True)
class TestNecAgentCallback(TestNecAgentBase):
def test_port_update(self):
with contextlib.nested(
mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_by_id'),
mock.patch.object(self.agent.sg_agent, 'refresh_firewall')
) as (get_vif_port_by_id, refresh_firewall):
context = mock.Mock()
vifport = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1',
self.agent.int_br)
# The OVS port does not exist.
get_vif_port_by_id.return_value = None
port = {'id': 'update-port-1'}
self.agent.callback_nec.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 1)
self.assertFalse(refresh_firewall.call_count)
# The OVS port exists but no security group is associated.
get_vif_port_by_id.return_value = vifport
port = {'id': 'update-port-1'}
self.agent.callback_nec.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 2)
self.assertFalse(refresh_firewall.call_count)
# The OVS port exists but a security group is associated.
get_vif_port_by_id.return_value = vifport
port = {'id': 'update-port-1',
ext_sg.SECURITYGROUPS: ['default']}
self.agent.callback_nec.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 3)
self.assertEqual(refresh_firewall.call_count, 1)
get_vif_port_by_id.return_value = None
port = {'id': 'update-port-1',
ext_sg.SECURITYGROUPS: ['default']}
self.agent.callback_nec.port_update(context, port=port)
self.assertEqual(get_vif_port_by_id.call_count, 4)
self.assertEqual(refresh_firewall.call_count, 1)
class TestNecAgentPluginApi(TestNecAgentBase):
def _test_plugin_api(self, expected_failure=False):
with contextlib.nested(
mock.patch.object(nec_neutron_agent.NECPluginApi, 'make_msg'),
mock.patch.object(nec_neutron_agent.NECPluginApi, 'call'),
mock.patch.object(nec_neutron_agent, 'LOG')
) as (make_msg, apicall, log):
agent_id = 'nec-q-agent.dummy-host'
if expected_failure:
apicall.side_effect = Exception()
self.agent.plugin_rpc.update_ports(
mock.sentinel.ctx, agent_id, OVS_DPID_0X,
# port_added
[{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'},
{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}],
# port_removed
['id-3', 'id-4', 'id-5'])
make_msg.assert_called_once_with(
'update_ports', topic='q-agent-notifier',
agent_id=agent_id, datapath_id=OVS_DPID_0X,
port_added=[{'id': 'id-1', 'mac': 'mac-1', 'port_no': '1'},
{'id': 'id-2', 'mac': 'mac-2', 'port_no': '2'}],
port_removed=['id-3', 'id-4', 'id-5'])
apicall.assert_called_once_with(mock.sentinel.ctx,
make_msg.return_value)
self.assertTrue(log.info.called)
if expected_failure:
self.assertTrue(log.warn.called)
def test_plugin_api(self):
self._test_plugin_api()
class TestNecAgentMain(base.BaseTestCase):
def test_main(self):
with contextlib.nested(
mock.patch.object(nec_neutron_agent, 'NECNeutronAgent'),
mock.patch.object(nec_neutron_agent, 'common_config'),
mock.patch.object(nec_neutron_agent, 'config')
) as (agent, common_config, cfg):
cfg.OVS.integration_bridge = 'br-int-x'
cfg.AGENT.root_helper = 'dummy-helper'
cfg.AGENT.polling_interval = 10
nec_neutron_agent.main()
self.assertTrue(common_config.setup_logging.called)
agent.assert_has_calls([
mock.call('br-int-x', 'dummy-helper', 10),
mock.call().daemon_loop()
])
| apache-2.0 |
sorenk/ansible | lib/ansible/modules/cloud/openstack/os_ironic_inspect.py | 15 | 4998 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_inspect
short_description: Explicitly triggers baremetal node introspection in ironic.
extends_documentation_fragment: openstack
author: "Julia Kreger (@juliakreger)"
version_added: "2.1"
description:
- Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
This command may be out of band or in-band depending on the ironic driver configuration.
This is only possible on nodes in 'manageable' and 'available' state.
options:
mac:
description:
- unique mac address that is used to attempt to identify the host.
uuid:
description:
- globally unique identifier (UUID) to identify the host.
name:
description:
- unique name identifier to identify the host in Ironic.
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
Use with "auth" and "auth_type" settings set to None.
timeout:
description:
- A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
default: 1200
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["shade"]
'''
RETURN = '''
ansible_facts:
description: Dictionary of new facts representing discovered properties of the node..
returned: changed
type: complex
contains:
memory_mb:
description: Amount of node memory as updated in the node properties
type: string
sample: "1024"
cpu_arch:
description: Detected CPU architecture type
type: string
sample: "x86_64"
local_gb:
description: Total size of local disk storage as updaed in node properties.
type: string
sample: "10"
cpus:
description: Count of cpu cores defined in the updated node properties.
type: string
sample: "1"
'''
EXAMPLES = '''
# Invoke node inspection
- os_ironic_inspect:
name: "testnode1"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
def main():
argument_spec = openstack_full_argument_spec(
auth_type=dict(required=False),
uuid=dict(required=False),
name=dict(required=False),
mac=dict(required=False),
ironic_url=dict(required=False),
timeout=dict(default=1200, type='int', required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears to be disabled, "
"Please define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
shade, cloud = openstack_cloud_from_module(
module, min_version='1.0.0')
try:
if module.params['name'] or module.params['uuid']:
server = cloud.get_machine(_choose_id_value(module))
elif module.params['mac']:
server = cloud.get_machine_by_mac(module.params['mac'])
else:
module.fail_json(msg="The worlds did not align, "
"the host was not found as "
"no name, uuid, or mac was "
"defined.")
if server:
cloud.inspect_machine(server['uuid'], module.params['wait'])
# TODO(TheJulia): diff properties, ?and ports? and determine
# if a change occurred. In theory, the node is always changed
# if introspection is able to update the record.
module.exit_json(changed=True,
ansible_facts=server['properties'])
else:
module.fail_json(msg="node not found.")
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 |
lanius/hunk | hunk/production.py | 1 | 1206 | # -*- coding: utf-8 -*-
"""
hunk.production
~~~~~~~~~~~~~~~
Provides a class to configure production environment.
"""
import importlib
import os
import sys
from ._compat import urljoin, urlunsplit
class ProductionEnvironment(object):
"""Holds information for a production environment to dispatch to it."""
def __init__(self):
self.routes = set()
self.scheme = 'http'
self.hostname = 'localhost'
self.port = 9000
def load(self, dirpath, filename):
filepath = os.path.join(dirpath, filename)
if not os.path.exists(filepath):
return # skipped
modname, _ = os.path.splitext(filename)
sys.path.append(dirpath)
config = importlib.import_module(modname)
for attr in ['scheme', 'hostname', 'port']:
if hasattr(config, attr):
setattr(self, attr, getattr(config, attr))
if hasattr(config, 'routes'):
self.routes.update(config.routes)
def build_url(self, path):
base_url = urlunsplit((
self.scheme,
':'.join([self.hostname, str(self.port)]),
'', '', ''
))
return urljoin(base_url, path)
| mit |
jackru/pybrain | docs/tutorials/blackboxoptimization.py | 25 | 4601 | from __future__ import print_function
############################################################################
# PyBrain Tutorial "Black Box Optimization"
#
# Author: Tom Schaul, [email protected]
############################################################################
__author__ = 'Tom Schaul, [email protected]'
""" A script that attempts to illustrate a large variety of
use-cases for different kinds of black-box learning algorithms.
"""
from pybrain.structure.networks.network import Network
from pybrain.optimization import * #@UnusedWildImport
""" The problem we would like to solve can be anything that
has something like a fitness function.
The following switches between two different examples.
The variable 'theparams' contains the trainable
parameters that affect the fitness. """
if False:
""" Simple function optimization:
here the parameters are learned directly. """
from scipy import randn
from pybrain.rl.environments.functions import SphereFunction
thetask = SphereFunction(3)
theparams = randn(3)
else:
""" Simple pole-balancing task:
here we learn the weights of a neural network controller."""
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole.balancetask import BalanceTask
thetask = BalanceTask()
theparams = buildNetwork(thetask.outdim, thetask.indim, bias=False)
print('Subsequently, we attempt to solve the following task:')
print(thetask.__class__.__name__)
if isinstance(theparams, Network):
print('\nby finding good weights for this (simple) network:')
print(theparams)
print('\nwhich has', theparams.paramdim, 'trainable parameters. (the dimensions of its layers are:', end=' ')
for m in theparams.modules:
print(m.indim, ',', end=' ')
print(')\n')
""" We allow every algorithm a limited number of evaluations. """
maxEvals = 1000
""" Standard function minimization: """
print('fmin', NelderMead(thetask, theparams, maxEvaluations=maxEvals).learn())
""" The same, using some other algorithms
(note that the syntax for invoking them is always the same) """
print('CMA', CMAES(thetask, theparams, maxEvaluations=maxEvals).learn())
print('NES', ExactNES(thetask, theparams, maxEvaluations=maxEvals).learn())
print('FEM', FEM(thetask, theparams, maxEvaluations=maxEvals).learn())
print('Finite Differences', FiniteDifferences(thetask, theparams, maxEvaluations=maxEvals).learn())
print('SPSA', SimpleSPSA(thetask, theparams, maxEvaluations=maxEvals).learn())
print('PGPE', PGPE(thetask, theparams, maxEvaluations=maxEvals).learn())
""" Evolutionary methods fall in the Learner framework as well.
All the following are examples."""
print('HillClimber', HillClimber(thetask, theparams, maxEvaluations=maxEvals).learn())
print('WeightGuessing', WeightGuessing(thetask, theparams, maxEvaluations=maxEvals).learn())
print('ES 50+50', ES(thetask, theparams, maxEvaluations=maxEvals).learn())
""" We can change some default parameters, e.g."""
print('ES 5+5', ES(thetask, theparams, mu=5, lambada=5, maxEvaluations=maxEvals).learn())
""" Memetic algorithms are a kind of meta-algorithm, doing topology mutations
on the top-level, and using other algorithms internally
as a kind of local search (default there: hill-climbing)."""
print('Memetic Climber', MemeticSearch(thetask, theparams, maxEvaluations=maxEvals).learn())
print('Memetic ES 50+50', MemeticSearch(thetask, theparams, maxEvaluations=maxEvals,
localSearch=ES, localSteps=200).learn())
print('Memetic ES 5+5', MemeticSearch(thetask, theparams, maxEvaluations=maxEvals,
localSearch=ES,
localSearchArgs={'mu': 5, 'lambada': 5}).learn())
print('Memetic NES', MemeticSearch(thetask, theparams, maxEvaluations=maxEvals,
localSearch=ExactNES,
localSearchArgs={'batchSize': 20}).learn())
""" Inner memetic is the population based variant (on the topology level). """
print('Inner Memetic Climber', InnerMemeticSearch(thetask, theparams, maxEvaluations=maxEvals).learn())
print('Inner Memetic CMA', InnerMemeticSearch(thetask, theparams, maxEvaluations=maxEvals,
localSearch=CMAES).learn())
""" Inverse memetic algorithms do local search on topology mutations,
and weight changes in the top-level search. """
print('Inverse Memetic Climber', InverseMemeticSearch(thetask, theparams, maxEvaluations=maxEvals).learn())
| bsd-3-clause |
sam-tsai/django | tests/max_lengths/tests.py | 380 | 1589 | from __future__ import unicode_literals
import unittest
from .models import PersonWithCustomMaxLengths, PersonWithDefaultMaxLengths
class MaxLengthArgumentsTests(unittest.TestCase):
def verify_max_length(self, model, field, length):
self.assertEqual(model._meta.get_field(field).max_length, length)
def test_default_max_lengths(self):
self.verify_max_length(PersonWithDefaultMaxLengths, 'email', 254)
self.verify_max_length(PersonWithDefaultMaxLengths, 'vcard', 100)
self.verify_max_length(PersonWithDefaultMaxLengths, 'homepage', 200)
self.verify_max_length(PersonWithDefaultMaxLengths, 'avatar', 100)
def test_custom_max_lengths(self):
self.verify_max_length(PersonWithCustomMaxLengths, 'email', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'vcard', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'homepage', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'avatar', 250)
class MaxLengthORMTests(unittest.TestCase):
def test_custom_max_lengths(self):
args = {
"email": "[email protected]",
"vcard": "vcard",
"homepage": "http://example.com/",
"avatar": "me.jpg"
}
for field in ("email", "vcard", "homepage", "avatar"):
new_args = args.copy()
new_args[field] = "X" * 250 # a value longer than any of the default fields could hold.
p = PersonWithCustomMaxLengths.objects.create(**new_args)
self.assertEqual(getattr(p, field), ("X" * 250))
| bsd-3-clause |
gnuhub/intellij-community | python/helpers/pydev/stubs/_django_manager_body.py | 102 | 8951 | # This is a dummy for code-completion purposes.
def __unicode__(self):
"""
Return "app_label.model_label.manager_name".
"""
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
def _db(self):
"""
"""
def _get_queryset_methods(cls, queryset_class):
"""
"""
def _hints(self):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
def _inherited(self):
"""
"""
def _insert(self, *args, **kwargs):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
def _queryset_class(self):
"""
Represents a lazy database lookup for a set of objects.
"""
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
def _update(self, *args, **kwargs):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
def all(self):
"""
@rtype: django.db.models.query.QuerySet
"""
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
def bulk_create(self, *args, **kwargs):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
def check(self, **kwargs):
"""
"""
def complex_filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
@rtype: django.db.models.query.QuerySet
"""
def contribute_to_class(self, model, name):
"""
"""
def count(self, *args, **kwargs):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
def create(self, *args, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
def creation_counter(self):
"""
"""
def dates(self, *args, **kwargs):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
def datetimes(self, *args, **kwargs):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
def db(self):
"""
"""
def db_manager(self, using=None, hints=None):
"""
"""
def defer(self, *args, **kwargs):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
def distinct(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select only distinct results.
@rtype: django.db.models.query.QuerySet
"""
def earliest(self, *args, **kwargs):
"""
"""
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
@rtype: django.db.models.query.QuerySet
"""
def exists(self, *args, **kwargs):
"""
"""
def extra(self, *args, **kwargs):
"""
Adds extra SQL fragments to the query.
"""
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
@rtype: django.db.models.query.QuerySet
"""
def first(self, *args, **kwargs):
"""
Returns the first object of a query, returns None if no match is found.
"""
def from_queryset(cls, queryset_class, class_name=None):
"""
"""
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
def get_or_create(self, *args, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
@rtype: django.db.models.query.QuerySet
"""
def in_bulk(self, *args, **kwargs):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
def iterator(self, *args, **kwargs):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
def last(self, *args, **kwargs):
"""
Returns the last object of a query, returns None if no match is found.
"""
def latest(self, *args, **kwargs):
"""
"""
def model(self):
"""
MyModel(id)
"""
def none(self, *args, **kwargs):
"""
Returns an empty QuerySet.
@rtype: django.db.models.query.QuerySet
"""
def only(self, *args, **kwargs):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
def order_by(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the ordering changed.
@rtype: django.db.models.query.QuerySet
"""
def prefetch_related(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
@rtype: django.db.models.query.QuerySet
"""
def raw(self, *args, **kwargs):
"""
"""
def reverse(self, *args, **kwargs):
"""
Reverses the ordering of the QuerySet.
@rtype: django.db.models.query.QuerySet
"""
def select_for_update(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
@rtype: django.db.models.query.QuerySet
"""
def select_related(self, *args, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
@rtype: django.db.models.query.QuerySet
"""
def update(self, *args, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
def update_or_create(self, *args, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
def using(self, *args, **kwargs):
"""
Selects which database this QuerySet should execute its query against.
@rtype: django.db.models.query.QuerySet
"""
def values(self, *args, **kwargs):
"""
"""
def values_list(self, *args, **kwargs):
"""
"""
| apache-2.0 |
jimi-c/ansible | lib/ansible/plugins/action/assert.py | 6 | 3165 | # Copyright 2012, Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.playbook.conditional import Conditional
from ansible.plugins.action import ActionBase
from ansible.module_utils.six import string_types
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if 'that' not in self._task.args:
raise AnsibleError('conditional required in "that" string')
fail_msg = None
success_msg = None
fail_msg = self._task.args.get('fail_msg', self._task.args.get('msg'))
if fail_msg is None:
fail_msg = 'Assertion failed'
elif not isinstance(fail_msg, string_types):
raise AnsibleError('Incorrect type for fail_msg or msg, expected string and got %s' % type(fail_msg))
success_msg = self._task.args.get('success_msg')
if success_msg is None:
success_msg = 'All assertions passed'
elif not isinstance(success_msg, string_types):
raise AnsibleError('Incorrect type for success_msg, expected string and got %s' % type(success_msg))
# make sure the 'that' items are a list
thats = self._task.args['that']
if not isinstance(thats, list):
thats = [thats]
# Now we iterate over the that items, temporarily assigning them
# to the task's when value so we can evaluate the conditional using
# the built in evaluate function. The when has already been evaluated
# by this point, and is not used again, so we don't care about mangling
# that value now
cond = Conditional(loader=self._loader)
result['_ansible_verbose_always'] = True
for that in thats:
cond.when = [that]
test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
result['assertion'] = that
result['msg'] = fail_msg
return result
result['changed'] = False
result['msg'] = success_msg
return result
| gpl-3.0 |
CongSmile/avocado-vt | virttest/libvirt_xml/devices/panic.py | 11 | 1347 | """
panic device support class(es)
http://libvirt.org/formatdomain.html#elementsPanic
"""
from virttest.libvirt_xml import accessors
from virttest.libvirt_xml.devices import base
class Panic(base.UntypedDeviceBase):
__slots__ = ('model', 'addr_type', 'addr_iobase', 'addr_controller',
'addr_bus', 'addr_port')
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute('model', self, parent_xpath='/',
tag_name="panic", attribute='model')
accessors.XMLAttribute('addr_type', self, parent_xpath='/',
tag_name="address", attribute='type')
accessors.XMLAttribute('addr_iobase', self, parent_xpath='/',
tag_name="address", attribute='iobase')
accessors.XMLAttribute('addr_controller', self, parent_xpath='/',
tag_name="address", attribute='controller')
accessors.XMLAttribute('addr_bus', self, parent_xpath='/',
tag_name="address", attribute='bus')
accessors.XMLAttribute('addr_port', self, parent_xpath='/',
tag_name="address", attribute='port')
super(Panic, self).__init__(device_tag='panic',
virsh_instance=virsh_instance)
| gpl-2.0 |
humbhenri/Sudoku_Poliglota | python/test_sudoku.py | 1 | 2125 | import unittest
from .sudoku import *
from copy import deepcopy
SUDOKU_SAMPLE = [
[2, 0, 0, 0, 0, 0, 0, 6, 0],
[0, 0, 0, 0, 7, 5, 0, 3, 0],
[0, 4, 8, 0, 9, 0, 1, 0, 0],
[0, 0, 0, 3, 0, 0, 0, 0, 0],
[3, 0, 0, 0, 1, 0, 0, 0, 9],
[0, 0, 0, 0, 0, 8, 0, 0, 0],
[0, 0, 1, 0, 2, 0, 5, 7, 0],
[0, 8, 0, 7, 3, 0, 0, 0, 0],
[0, 9, 0, 0, 0, 0, 0, 0, 4]]
SUDOKU_SAMPLE_LINE = "200000060000075030048090100000300000300010009000008000001020570080730000090000004"
SUDOKU_SAMPLE_FORMATED = """2 0 0 0 0 0 0 6 0
0 0 0 0 7 5 0 3 0
0 4 8 0 9 0 1 0 0
0 0 0 3 0 0 0 0 0
3 0 0 0 1 0 0 0 9
0 0 0 0 0 8 0 0 0
0 0 1 0 2 0 5 7 0
0 8 0 7 3 0 0 0 0
0 9 0 0 0 0 0 0 4
"""
SUDOKU_SOLUTION = [[2, 7, 3, 4, 8, 1, 9, 6, 5],
[9, 1, 6, 2, 7, 5, 4, 3, 8],
[5, 4, 8, 6, 9, 3, 1, 2, 7],
[8, 5, 9, 3, 4, 7, 6, 1, 2],
[3, 6, 7, 5, 1, 2, 8, 4, 9],
[1, 2, 4, 9, 6, 8, 7, 5, 3],
[4, 3, 1, 8, 2, 9, 5, 7, 6],
[6, 8, 5, 7, 3, 4, 2, 9, 1],
[7, 9, 2, 1, 5, 6, 3, 8, 4]]
SUDOKU_SOLUTION_FORMATED = u"""2 7 3 4 8 1 9 6 5
9 1 6 2 7 5 4 3 8
5 4 8 6 9 3 1 2 7
8 5 9 3 4 7 6 1 2
3 6 7 5 1 2 8 4 9
1 2 4 9 6 8 7 5 3
4 3 1 8 2 9 5 7 6
6 8 5 7 3 4 2 9 1
7 9 2 1 5 6 3 8 4"""
class TestSudoku(unittest.TestCase):
def test_from_str(self):
self.assertEqual(SUDOKU_SAMPLE, from_str(SUDOKU_SAMPLE_LINE))
def test_to_str(self):
self.assertEqual(SUDOKU_SAMPLE_FORMATED.strip(), to_str(SUDOKU_SAMPLE).strip())
def test_solve(self):
self.assertEqual(SUDOKU_SOLUTION, solve(deepcopy(SUDOKU_SAMPLE)))
def test_process(self):
input = io.StringIO()
input.write(SUDOKU_SAMPLE_LINE)
input.seek(0)
output = io.StringIO()
process(input, output)
results = output.getvalue()
input.close()
output.close()
self.assertEqual(SUDOKU_SOLUTION_FORMATED.strip(), results.strip())
if __name__ == '__main__':
unittest.main()
| mit |
igou/rt-thread | tools/building.py | 1 | 31441 | #
# File : building.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
# 2015-07-25 Bernard Add LOCAL_CCFLAGS/LOCAL_CPPPATH/LOCAL_CPPDEFINES for
# group definition.
#
import os
import sys
import string
import utils
from SCons.Script import *
from utils import _make_path_relative
from mkdist import do_copy_file
BuildOptions = {}
Projects = []
Rtt_Root = ''
Env = None
# SCons PreProcessor patch
def start_handling_includes(self, t=None):
"""
Causes the PreProcessor object to start processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates True, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated
False.
"""
d = self.dispatch_table
p = self.stack[-1] if self.stack else self.default_table
for k in ('import', 'include', 'include_next', 'define'):
d[k] = p[k]
def stop_handling_includes(self, t=None):
"""
Causes the PreProcessor object to stop processing #import,
#include and #include_next lines.
This method will be called when a #if, #ifdef, #ifndef or #elif
evaluates False, or when we reach the #else in a #if, #ifdef,
#ifndef or #elif block where a condition already evaluated True.
"""
d = self.dispatch_table
d['import'] = self.do_nothing
d['include'] = self.do_nothing
d['include_next'] = self.do_nothing
d['define'] = self.do_nothing
PatchedPreProcessor = SCons.cpp.PreProcessor
PatchedPreProcessor.start_handling_includes = start_handling_includes
PatchedPreProcessor.stop_handling_includes = stop_handling_includes
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
# deal with the cmd build-in commands which cannot be used in
# subprocess.Popen
if cmd == 'del':
for f in args[1:]:
try:
os.remove(f)
except Exception as e:
print ('Error removing file: ' + e)
return -1
return 0
import subprocess
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
# Make sure the env is constructed by strings
_e = dict([(k, str(v)) for k, v in env.items()])
# Windows(tm) CreateProcess does not use the env passed to it to find
# the executables. So we have to modify our own PATH to make Popen
# work.
old_path = os.environ['PATH']
os.environ['PATH'] = _e['PATH']
try:
proc = subprocess.Popen(cmdline, env=_e, shell=False)
except Exception as e:
print ('Error in calling:\n' + cmdline)
print ('Exception: ' + e + ': ' + os.strerror(e.errno))
return e.errno
finally:
os.environ['PATH'] = old_path
return proc.wait()
# generate cconfig.h file
def GenCconfigFile(env, BuildOptions):
import rtconfig
if rtconfig.PLATFORM == 'gcc':
contents = ''
if not os.path.isfile('cconfig.h'):
import gcc
gcc.GenerateGCCConfig(rtconfig)
# try again
if os.path.isfile('cconfig.h'):
f = open('cconfig.h', 'r')
if f:
contents = f.read()
f.close();
prep = PatchedPreProcessor()
prep.process_contents(contents)
options = prep.cpp_namespace
BuildOptions.update(options)
# add HAVE_CCONFIG_H definition
env.AppendUnique(CPPDEFINES = ['HAVE_CCONFIG_H'])
def PrepareBuilding(env, root_directory, has_libcpu=False, remove_components = []):
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
# ===== Add option to SCons =====
AddOption('--dist',
dest = 'make-dist',
action = 'store_true',
default = False,
help = 'make distribution')
AddOption('--dist-strip',
dest = 'make-dist-strip',
action = 'store_true',
default = False,
help = 'make distribution and strip useless files')
AddOption('--cscope',
dest = 'cscope',
action = 'store_true',
default = False,
help = 'Build Cscope cross reference database. Requires cscope installed.')
AddOption('--clang-analyzer',
dest = 'clang-analyzer',
action = 'store_true',
default = False,
help = 'Perform static analyze with Clang-analyzer. ' + \
'Requires Clang installed.\n' + \
'It is recommended to use with scan-build like this:\n' + \
'`scan-build scons --clang-analyzer`\n' + \
'If things goes well, scan-build will instruct you to invoke scan-view.')
AddOption('--buildlib',
dest = 'buildlib',
type = 'string',
help = 'building library of a component')
AddOption('--cleanlib',
dest = 'cleanlib',
action = 'store_true',
default = False,
help = 'clean up the library by --buildlib')
AddOption('--target',
dest = 'target',
type = 'string',
help = 'set target project: mdk/mdk4/mdk5/iar/vs/vsc/ua/cdk/ses')
AddOption('--genconfig',
dest = 'genconfig',
action = 'store_true',
default = False,
help = 'Generate .config from rtconfig.h')
AddOption('--useconfig',
dest = 'useconfig',
type = 'string',
help = 'make rtconfig.h from config file.')
AddOption('--verbose',
dest = 'verbose',
action = 'store_true',
default = False,
help = 'print verbose information during build')
Env = env
Rtt_Root = os.path.abspath(root_directory)
# make an absolute root directory
RTT_ROOT = Rtt_Root
Export('RTT_ROOT')
# set RTT_ROOT in ENV
Env['RTT_ROOT'] = Rtt_Root
# set BSP_ROOT in ENV
Env['BSP_ROOT'] = Dir('#').abspath
sys.path = sys.path + [os.path.join(Rtt_Root, 'tools')]
# {target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'vsc' : ('gcc', 'gcc'),
'cb':('keil', 'armcc'),
'ua':('gcc', 'gcc'),
'cdk':('gcc', 'gcc'),
'ses' : ('gcc', 'gcc')}
tgt_name = GetOption('target')
if tgt_name:
# --target will change the toolchain settings which clang-analyzer is
# depend on
if GetOption('clang-analyzer'):
print ('--clang-analyzer cannot be used with --target')
sys.exit(1)
SetOption('no_exec', 1)
try:
rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name]
# replace the 'RTT_CC' to 'CROSS_TOOL'
os.environ['RTT_CC'] = rtconfig.CROSS_TOOL
utils.ReloadModule(rtconfig)
except KeyError:
print ('Unknow target: '+ tgt_name+'. Avaible targets: ' +', '.join(tgt_dict.keys()))
sys.exit(1)
elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \
and rtconfig.PLATFORM == 'gcc':
AddDepend('RT_USING_MINILIBC')
# auto change the 'RTT_EXEC_PATH' when 'rtconfig.EXEC_PATH' get failed
if not os.path.exists(rtconfig.EXEC_PATH):
if 'RTT_EXEC_PATH' in os.environ:
# del the 'RTT_EXEC_PATH' and using the 'EXEC_PATH' setting on rtconfig.py
del os.environ['RTT_EXEC_PATH']
utils.ReloadModule(rtconfig)
# add compability with Keil MDK 4.6 which changes the directory of armcc.exe
if rtconfig.PLATFORM == 'armcc':
if not os.path.isfile(os.path.join(rtconfig.EXEC_PATH, 'armcc.exe')):
if rtconfig.EXEC_PATH.find('bin40') > 0:
rtconfig.EXEC_PATH = rtconfig.EXEC_PATH.replace('bin40', 'armcc/bin')
Env['LINKFLAGS'] = Env['LINKFLAGS'].replace('RV31', 'armcc')
# reset AR command flags
env['ARCOM'] = '$AR --create $TARGET $SOURCES'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['LIBLINKPREFIX'] = ''
env['LIBLINKSUFFIX'] = '.lib'
env['LIBDIRPREFIX'] = '--userlibpath '
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
if env['PLATFORM'] == 'win32':
os.environ['PATH'] = rtconfig.EXEC_PATH + ";" + os.environ['PATH']
else:
os.environ['PATH'] = rtconfig.EXEC_PATH + ":" + os.environ['PATH']
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
# add rtconfig.h path
env.Append(CPPPATH = [str(Dir('#').abspath)])
# add library build action
act = SCons.Action.Action(BuildLibInstallAction, 'Install compiled library... $TARGET')
bld = Builder(action = act)
Env.Append(BUILDERS = {'BuildLib': bld})
# parse rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open('rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
if GetOption('clang-analyzer'):
# perform what scan-build does
env.Replace(
CC = 'ccc-analyzer',
CXX = 'c++-analyzer',
# skip as and link
LINK = 'true',
AS = 'true',)
env["ENV"].update(x for x in os.environ.items() if x[0].startswith("CCC_"))
# only check, don't compile. ccc-analyzer use CCC_CC as the CC.
# fsyntax-only will give us some additional warning messages
env['ENV']['CCC_CC'] = 'clang'
env.Append(CFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
env['ENV']['CCC_CXX'] = 'clang++'
env.Append(CXXFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding'])
# remove the POST_ACTION as it will cause meaningless errors(file not
# found or something like that).
rtconfig.POST_ACTION = ''
# generate cconfig.h file
GenCconfigFile(env, BuildOptions)
# auto append '_REENT_SMALL' when using newlib 'nano.specs' option
if rtconfig.PLATFORM == 'gcc' and str(env['LINKFLAGS']).find('nano.specs') != -1:
env.AppendUnique(CPPDEFINES = ['_REENT_SMALL'])
if GetOption('genconfig'):
from genconf import genconfig
genconfig()
exit(0)
if env['PLATFORM'] != 'win32':
AddOption('--menuconfig',
dest = 'menuconfig',
action = 'store_true',
default = False,
help = 'make menuconfig for RT-Thread BSP')
if GetOption('menuconfig'):
from menuconfig import menuconfig
menuconfig(Rtt_Root)
exit(0)
AddOption('--pyconfig',
dest = 'pyconfig',
action = 'store_true',
default = False,
help = 'make menuconfig for RT-Thread BSP')
if GetOption('pyconfig'):
from menuconfig import pyconfig
pyconfig(Rtt_Root)
exit(0)
configfn = GetOption('useconfig')
if configfn:
from menuconfig import mk_rtconfig
mk_rtconfig(configfn)
exit(0)
if not GetOption('verbose'):
# override the default verbose command string
env.Replace(
ARCOMSTR = 'AR $TARGET',
ASCOMSTR = 'AS $TARGET',
ASPPCOMSTR = 'AS $TARGET',
CCCOMSTR = 'CC $TARGET',
CXXCOMSTR = 'CXX $TARGET',
LINKCOMSTR = 'LINK $TARGET'
)
# fix the linker for C++
if GetDepend('RT_USING_CPLUSPLUS'):
if env['LINK'].find('gcc') != -1:
env['LINK'] = env['LINK'].replace('gcc', 'g++')
# we need to seperate the variant_dir for BSPs and the kernels. BSPs could
# have their own components etc. If they point to the same folder, SCons
# would find the wrong source code to compile.
bsp_vdir = 'build'
kernel_vdir = 'build/kernel'
# board build script
objs = SConscript('SConscript', variant_dir=bsp_vdir, duplicate=0)
# include kernel
objs.extend(SConscript(Rtt_Root + '/src/SConscript', variant_dir=kernel_vdir + '/src', duplicate=0))
# include libcpu
if not has_libcpu:
objs.extend(SConscript(Rtt_Root + '/libcpu/SConscript',
variant_dir=kernel_vdir + '/libcpu', duplicate=0))
# include components
objs.extend(SConscript(Rtt_Root + '/components/SConscript',
variant_dir=kernel_vdir + '/components',
duplicate=0,
exports='remove_components'))
return objs
def PrepareModuleBuilding(env, root_directory, bsp_directory):
import rtconfig
global BuildOptions
global Env
global Rtt_Root
# patch for win32 spawn
if env['PLATFORM'] == 'win32':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
Env = env
Rtt_Root = root_directory
# parse bsp rtconfig.h to get used component
PreProcessor = PatchedPreProcessor()
f = open(bsp_directory + '/rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add build/clean library option for library checking
AddOption('--buildlib',
dest='buildlib',
type='string',
help='building library of a component')
AddOption('--cleanlib',
dest='cleanlib',
action='store_true',
default=False,
help='clean up the library by --buildlib')
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
def GetConfigValue(name):
assert type(name) == str, 'GetConfigValue: only string parameter is valid'
try:
return BuildOptions[name]
except:
return ''
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not depend in BuildOptions or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in BuildOptions or BuildOptions[item] == 0:
building = False
return building
def LocalOptions(config_filename):
from SCons.Script import SCons
# parse wiced_config.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = open(config_filename, 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
local_options = PreProcessor.cpp_namespace
return local_options
def GetLocalDepend(options, depend):
building = True
if type(depend) == type('str'):
if not depend in options or options[depend] == 0:
building = False
elif options[depend] != '':
return options[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not item in options or options[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if 'CCFLAGS' in group:
if 'CCFLAGS' in src_group:
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if 'CPPPATH' in group:
if 'CPPPATH' in src_group:
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if 'CPPDEFINES' in group:
if 'CPPDEFINES' in src_group:
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if 'ASFLAGS' in group:
if 'ASFLAGS' in src_group:
src_group['ASFLAGS'] = src_group['ASFLAGS'] + group['ASFLAGS']
else:
src_group['ASFLAGS'] = group['ASFLAGS']
# for local CCFLAGS/CPPPATH/CPPDEFINES
if 'LOCAL_CCFLAGS' in group:
if 'LOCAL_CCFLAGS' in src_group:
src_group['LOCAL_CCFLAGS'] = src_group['LOCAL_CCFLAGS'] + group['LOCAL_CCFLAGS']
else:
src_group['LOCAL_CCFLAGS'] = group['LOCAL_CCFLAGS']
if 'LOCAL_CPPPATH' in group:
if 'LOCAL_CPPPATH' in src_group:
src_group['LOCAL_CPPPATH'] = src_group['LOCAL_CPPPATH'] + group['LOCAL_CPPPATH']
else:
src_group['LOCAL_CPPPATH'] = group['LOCAL_CPPPATH']
if 'LOCAL_CPPDEFINES' in group:
if 'LOCAL_CPPDEFINES' in src_group:
src_group['LOCAL_CPPDEFINES'] = src_group['LOCAL_CPPDEFINES'] + group['LOCAL_CPPDEFINES']
else:
src_group['LOCAL_CPPDEFINES'] = group['LOCAL_CPPDEFINES']
if 'LINKFLAGS' in group:
if 'LINKFLAGS' in src_group:
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if 'LIBS' in group:
if 'LIBS' in src_group:
src_group['LIBS'] = src_group['LIBS'] + group['LIBS']
else:
src_group['LIBS'] = group['LIBS']
if 'LIBPATH' in group:
if 'LIBPATH' in src_group:
src_group['LIBPATH'] = src_group['LIBPATH'] + group['LIBPATH']
else:
src_group['LIBPATH'] = group['LIBPATH']
if 'LOCAL_ASFLAGS' in group:
if 'LOCAL_ASFLAGS' in src_group:
src_group['LOCAL_ASFLAGS'] = src_group['LOCAL_ASFLAGS'] + group['LOCAL_ASFLAGS']
else:
src_group['LOCAL_ASFLAGS'] = group['LOCAL_ASFLAGS']
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
# find exist group and get path of group
group_path = ''
for g in Projects:
if g['name'] == name:
group_path = g['path']
if group_path == '':
group_path = GetCurrentDir()
group = parameters
group['name'] = name
group['path'] = group_path
if type(src) == type([]):
group['src'] = File(src)
else:
group['src'] = src
if 'CCFLAGS' in group:
Env.AppendUnique(CCFLAGS = group['CCFLAGS'])
if 'CPPPATH' in group:
paths = []
for item in group['CPPPATH']:
paths.append(os.path.abspath(item))
group['CPPPATH'] = paths
Env.AppendUnique(CPPPATH = group['CPPPATH'])
if 'CPPDEFINES' in group:
Env.AppendUnique(CPPDEFINES = group['CPPDEFINES'])
if 'LINKFLAGS' in group:
Env.AppendUnique(LINKFLAGS = group['LINKFLAGS'])
if 'ASFLAGS' in group:
Env.AppendUnique(ASFLAGS = group['ASFLAGS'])
if 'LOCAL_CPPPATH' in group:
paths = []
for item in group['LOCAL_CPPPATH']:
paths.append(os.path.abspath(item))
group['LOCAL_CPPPATH'] = paths
import rtconfig
if rtconfig.PLATFORM == 'gcc':
if 'CCFLAGS' in group:
group['CCFLAGS'] = utils.GCCC99Patch(group['CCFLAGS'])
if 'LOCAL_CCFLAGS' in group:
group['LOCAL_CCFLAGS'] = utils.GCCC99Patch(group['LOCAL_CCFLAGS'])
# check whether to clean up library
if GetOption('cleanlib') and os.path.exists(os.path.join(group['path'], GroupLibFullName(name, Env))):
if group['src'] != []:
print ('Remove library:'+ GroupLibFullName(name, Env))
fn = os.path.join(group['path'], GroupLibFullName(name, Env))
if os.path.exists(fn):
os.unlink(fn)
if 'LIBS' in group:
Env.AppendUnique(LIBS = group['LIBS'])
if 'LIBPATH' in group:
Env.AppendUnique(LIBPATH = group['LIBPATH'])
# check whether to build group library
if 'LIBRARY' in group:
objs = Env.Library(name, group['src'])
else:
# only add source
objs = group['src']
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
# add a new group
Projects.append(group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
PREBUILDING = []
def RegisterPreBuildingAction(act):
global PREBUILDING
assert callable(act), 'Could only register callable objects. %s received' % repr(act)
PREBUILDING.append(act)
def PreBuilding():
global PREBUILDING
for a in PREBUILDING:
a()
def GroupLibName(name, env):
import rtconfig
if rtconfig.PLATFORM == 'armcc':
return name + '_rvds'
elif rtconfig.PLATFORM == 'gcc':
return name + '_gcc'
return name
def GroupLibFullName(name, env):
return env['LIBPREFIX'] + GroupLibName(name, env) + env['LIBSUFFIX']
def BuildLibInstallAction(target, source, env):
lib_name = GetOption('buildlib')
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibFullName(Group['name'], env)
dst_name = os.path.join(Group['path'], lib_name)
print ('Copy '+lib_name+' => ' +dst_name)
do_copy_file(lib_name, dst_name)
break
def DoBuilding(target, objects):
# merge all objects into one list
def one_list(l):
lst = []
for item in l:
if type(item) == type([]):
lst += one_list(item)
else:
lst.append(item)
return lst
# handle local group
def local_group(group, objects):
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group or 'LOCAL_ASFLAGS' in group:
CCFLAGS = Env.get('CCFLAGS', '') + group.get('LOCAL_CCFLAGS', '')
CPPPATH = Env.get('CPPPATH', ['']) + group.get('LOCAL_CPPPATH', [''])
CPPDEFINES = Env.get('CPPDEFINES', ['']) + group.get('LOCAL_CPPDEFINES', [''])
ASFLAGS = Env.get('ASFLAGS', '') + group.get('LOCAL_ASFLAGS', '')
for source in group['src']:
objects.append(Env.Object(source, CCFLAGS = CCFLAGS, ASFLAGS = ASFLAGS,
CPPPATH = CPPPATH, CPPDEFINES = CPPDEFINES))
return True
return False
objects = one_list(objects)
program = None
# check whether special buildlib option
lib_name = GetOption('buildlib')
if lib_name:
objects = [] # remove all of objects
# build library with special component
for Group in Projects:
if Group['name'] == lib_name:
lib_name = GroupLibName(Group['name'], Env)
if not local_group(Group, objects):
objects = Env.Object(Group['src'])
program = Env.Library(lib_name, objects)
# add library copy action
Env.BuildLib(lib_name, program)
break
else:
# remove source files with local flags setting
for group in Projects:
if 'LOCAL_CCFLAGS' in group or 'LOCAL_CPPPATH' in group or 'LOCAL_CPPDEFINES' in group:
for source in group['src']:
for obj in objects:
if source.abspath == obj.abspath or (len(obj.sources) > 0 and source.abspath == obj.sources[0].abspath):
objects.remove(obj)
# re-add the source files to the objects
for group in Projects:
local_group(group, objects)
program = Env.Program(target, objects)
EndBuilding(target, program)
def GenTargetProject(program = None):
if GetOption('target') == 'mdk':
from keil import MDKProject
from keil import MDK4Project
from keil import MDK5Project
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
template = os.path.isfile('template.uvprojx')
if template:
MDK5Project('project.uvprojx', Projects)
else:
print ('No template project file found.')
if GetOption('target') == 'mdk4':
from keil import MDK4Project
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'mdk5':
from keil import MDK5Project
MDK5Project('project.uvprojx', Projects)
if GetOption('target') == 'iar':
from iar import IARProject
IARProject('project.ewp', Projects)
if GetOption('target') == 'vs':
from vs import VSProject
VSProject('project.vcproj', Projects, program)
if GetOption('target') == 'vs2012':
from vs2012 import VS2012Project
VS2012Project('project.vcxproj', Projects, program)
if GetOption('target') == 'cb':
from codeblocks import CBProject
CBProject('project.cbp', Projects, program)
if GetOption('target') == 'ua':
from ua import PrepareUA
PrepareUA(Projects, Rtt_Root, str(Dir('#')))
if GetOption('target') == 'vsc':
from vsc import GenerateVSCode
GenerateVSCode(Env)
if GetOption('target') == 'cdk':
from cdk import CDKProject
CDKProject('project.cdkproj', Projects)
if GetOption('target') == 'ses':
from ses import SESProject
SESProject(Env)
def EndBuilding(target, program = None):
import rtconfig
need_exit = False
Env['target'] = program
Env['project'] = Projects
if hasattr(rtconfig, 'BSP_LIBRARY_TYPE'):
Env['bsp_lib_type'] = rtconfig.BSP_LIBRARY_TYPE
Env.AddPostAction(target, rtconfig.POST_ACTION)
# Add addition clean files
Clean(target, 'cconfig.h')
Clean(target, 'rtua.py')
Clean(target, 'rtua.pyc')
if GetOption('target'):
GenTargetProject(program)
BSP_ROOT = Dir('#').abspath
if GetOption('make-dist') and program != None:
from mkdist import MkDist
MkDist(program, BSP_ROOT, Rtt_Root, Env)
if GetOption('make-dist-strip') and program != None:
from mkdist import MkDist_Strip
MkDist_Strip(program, BSP_ROOT, Rtt_Root, Env)
need_exit = True
if GetOption('cscope'):
from cscope import CscopeDatabase
CscopeDatabase(Projects)
if not GetOption('help') and not GetOption('target'):
if not os.path.exists(rtconfig.EXEC_PATH):
print ("Error: the toolchain path (" + rtconfig.EXEC_PATH + ") is not exist, please check 'EXEC_PATH' in path or rtconfig.py.")
need_exit = True
if need_exit:
exit(0)
def SrcRemove(src, remove):
if not src:
return
src_bak = src[:]
if type(remove) == type('str'):
if os.path.isabs(remove):
remove = os.path.relpath(remove, GetCurrentDir())
remove = os.path.normpath(remove)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove:
src.remove(item)
else:
for remove_item in remove:
remove_str = str(remove_item)
if os.path.isabs(remove_str):
remove_str = os.path.relpath(remove_str, GetCurrentDir())
remove_str = os.path.normpath(remove_str)
for item in src_bak:
if type(item) == type('str'):
item_str = item
else:
item_str = item.rstr()
if os.path.isabs(item_str):
item_str = os.path.relpath(item_str, GetCurrentDir())
item_str = os.path.normpath(item_str)
if item_str == remove_str:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(Rtt_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = PatchedPreProcessor()
f = open(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_VERSION']))
subversion = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_SUBVERSION']))
if 'RT_REVISION' in def_ns:
revision = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_REVISION']))
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
def GlobSubDir(sub_dir, ext_name):
import os
import glob
def glob_source(sub_dir, ext_name):
list = os.listdir(sub_dir)
src = glob.glob(os.path.join(sub_dir, ext_name))
for item in list:
full_subdir = os.path.join(sub_dir, item)
if os.path.isdir(full_subdir):
src += glob_source(full_subdir, ext_name)
return src
dst = []
src = glob_source(sub_dir, ext_name)
for item in src:
dst.append(os.path.relpath(item, sub_dir))
return dst
def PackageSConscript(package):
from package import BuildPackage
return BuildPackage(package)
| apache-2.0 |
godfather1103/WeiboRobot | python27/1.0/lib/bsddb/test/test_dbenv.py | 68 | 19274 | import unittest
import os, glob
from test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
class DBEnv(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
def tearDown(self):
self.env.close()
del self.env
test_support.rmtree(self.homeDir)
class DBEnv_general(DBEnv) :
def test_get_open_flags(self) :
flags = db.DB_CREATE | db.DB_INIT_MPOOL
self.env.open(self.homeDir, flags)
self.assertEqual(flags, self.env.get_open_flags())
def test_get_open_flags2(self) :
flags = db.DB_CREATE | db.DB_INIT_MPOOL | \
db.DB_INIT_LOCK | db.DB_THREAD
self.env.open(self.homeDir, flags)
self.assertEqual(flags, self.env.get_open_flags())
if db.version() >= (4, 7) :
def test_lk_partitions(self) :
for i in [10, 20, 40] :
self.env.set_lk_partitions(i)
self.assertEqual(i, self.env.get_lk_partitions())
def test_getset_intermediate_dir_mode(self) :
self.assertEqual(None, self.env.get_intermediate_dir_mode())
for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] :
self.env.set_intermediate_dir_mode(mode)
self.assertEqual(mode, self.env.get_intermediate_dir_mode())
self.assertRaises(db.DBInvalidArgError,
self.env.set_intermediate_dir_mode, "abcde")
if db.version() >= (4, 6) :
def test_thread(self) :
for i in [16, 100, 1000] :
self.env.set_thread_count(i)
self.assertEqual(i, self.env.get_thread_count())
def test_cache_max(self) :
for size in [64, 128] :
size = size*1024*1024 # Megabytes
self.env.set_cache_max(0, size)
size2 = self.env.get_cache_max()
self.assertEqual(0, size2[0])
self.assertTrue(size <= size2[1])
self.assertTrue(2*size > size2[1])
if db.version() >= (4, 4) :
def test_mutex_stat(self) :
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK)
stat = self.env.mutex_stat()
self.assertTrue("mutex_inuse_max" in stat)
def test_lg_filemode(self) :
for i in [0600, 0660, 0666] :
self.env.set_lg_filemode(i)
self.assertEqual(i, self.env.get_lg_filemode())
def test_mp_max_openfd(self) :
for i in [17, 31, 42] :
self.env.set_mp_max_openfd(i)
self.assertEqual(i, self.env.get_mp_max_openfd())
def test_mp_max_write(self) :
for i in [100, 200, 300] :
for j in [1, 2, 3] :
j *= 1000000
self.env.set_mp_max_write(i, j)
v=self.env.get_mp_max_write()
self.assertEqual((i, j), v)
def test_invalid_txn(self) :
# This environment doesn't support transactions
self.assertRaises(db.DBInvalidArgError, self.env.txn_begin)
def test_mp_mmapsize(self) :
for i in [16, 32, 64] :
i *= 1024*1024
self.env.set_mp_mmapsize(i)
self.assertEqual(i, self.env.get_mp_mmapsize())
def test_tmp_dir(self) :
for i in ["a", "bb", "ccc"] :
self.env.set_tmp_dir(i)
self.assertEqual(i, self.env.get_tmp_dir())
def test_flags(self) :
self.env.set_flags(db.DB_AUTO_COMMIT, 1)
self.assertEqual(db.DB_AUTO_COMMIT, self.env.get_flags())
self.env.set_flags(db.DB_TXN_NOSYNC, 1)
self.assertEqual(db.DB_AUTO_COMMIT | db.DB_TXN_NOSYNC,
self.env.get_flags())
self.env.set_flags(db.DB_AUTO_COMMIT, 0)
self.assertEqual(db.DB_TXN_NOSYNC, self.env.get_flags())
self.env.set_flags(db.DB_TXN_NOSYNC, 0)
self.assertEqual(0, self.env.get_flags())
def test_lk_max_objects(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_objects(i)
self.assertEqual(i, self.env.get_lk_max_objects())
def test_lk_max_locks(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_locks(i)
self.assertEqual(i, self.env.get_lk_max_locks())
def test_lk_max_lockers(self) :
for i in [1000, 2000, 3000] :
self.env.set_lk_max_lockers(i)
self.assertEqual(i, self.env.get_lk_max_lockers())
def test_lg_regionmax(self) :
for i in [128, 256, 1000] :
i = i*1024*1024
self.env.set_lg_regionmax(i)
j = self.env.get_lg_regionmax()
self.assertTrue(i <= j)
self.assertTrue(2*i > j)
def test_lk_detect(self) :
flags= [db.DB_LOCK_DEFAULT, db.DB_LOCK_EXPIRE, db.DB_LOCK_MAXLOCKS,
db.DB_LOCK_MINLOCKS, db.DB_LOCK_MINWRITE,
db.DB_LOCK_OLDEST, db.DB_LOCK_RANDOM, db.DB_LOCK_YOUNGEST]
flags.append(db.DB_LOCK_MAXWRITE)
for i in flags :
self.env.set_lk_detect(i)
self.assertEqual(i, self.env.get_lk_detect())
def test_lg_dir(self) :
for i in ["a", "bb", "ccc", "dddd"] :
self.env.set_lg_dir(i)
self.assertEqual(i, self.env.get_lg_dir())
def test_lg_bsize(self) :
log_size = 70*1024
self.env.set_lg_bsize(log_size)
self.assertTrue(self.env.get_lg_bsize() >= log_size)
self.assertTrue(self.env.get_lg_bsize() < 4*log_size)
self.env.set_lg_bsize(4*log_size)
self.assertTrue(self.env.get_lg_bsize() >= 4*log_size)
def test_setget_data_dirs(self) :
dirs = ("a", "b", "c", "d")
for i in dirs :
self.env.set_data_dir(i)
self.assertEqual(dirs, self.env.get_data_dirs())
def test_setget_cachesize(self) :
cachesize = (0, 512*1024*1024, 3)
self.env.set_cachesize(*cachesize)
self.assertEqual(cachesize, self.env.get_cachesize())
cachesize = (0, 1*1024*1024, 5)
self.env.set_cachesize(*cachesize)
cachesize2 = self.env.get_cachesize()
self.assertEqual(cachesize[0], cachesize2[0])
self.assertEqual(cachesize[2], cachesize2[2])
# Berkeley DB expands the cache 25% accounting overhead,
# if the cache is small.
self.assertEqual(125, int(100.0*cachesize2[1]/cachesize[1]))
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
cachesize = (0, 2*1024*1024, 1)
self.assertRaises(db.DBInvalidArgError,
self.env.set_cachesize, *cachesize)
cachesize3 = self.env.get_cachesize()
self.assertEqual(cachesize2[0], cachesize3[0])
self.assertEqual(cachesize2[2], cachesize3[2])
# In Berkeley DB 5.1, the cachesize can change when opening the Env
self.assertTrue(cachesize2[1] <= cachesize3[1])
def test_set_cachesize_dbenv_db(self) :
# You can not configure the cachesize using
# the database handle, if you are using an environment.
d = db.DB(self.env)
self.assertRaises(db.DBInvalidArgError,
d.set_cachesize, 0, 1024*1024, 1)
def test_setget_shm_key(self) :
shm_key=137
self.env.set_shm_key(shm_key)
self.assertEqual(shm_key, self.env.get_shm_key())
self.env.set_shm_key(shm_key+1)
self.assertEqual(shm_key+1, self.env.get_shm_key())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
# If we try to reconfigure cache after opening the
# environment, core dump.
self.assertRaises(db.DBInvalidArgError,
self.env.set_shm_key, shm_key)
self.assertEqual(shm_key+1, self.env.get_shm_key())
if db.version() >= (4, 4) :
def test_mutex_setget_max(self) :
v = self.env.mutex_get_max()
v2 = v*2+1
self.env.mutex_set_max(v2)
self.assertEqual(v2, self.env.mutex_get_max())
self.env.mutex_set_max(v)
self.assertEqual(v, self.env.mutex_get_max())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_max, v2)
def test_mutex_setget_increment(self) :
v = self.env.mutex_get_increment()
v2 = 127
self.env.mutex_set_increment(v2)
self.assertEqual(v2, self.env.mutex_get_increment())
self.env.mutex_set_increment(v)
self.assertEqual(v, self.env.mutex_get_increment())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_increment, v2)
def test_mutex_setget_tas_spins(self) :
self.env.mutex_set_tas_spins(0) # Default = BDB decides
v = self.env.mutex_get_tas_spins()
v2 = v*2+1
self.env.mutex_set_tas_spins(v2)
self.assertEqual(v2, self.env.mutex_get_tas_spins())
self.env.mutex_set_tas_spins(v)
self.assertEqual(v, self.env.mutex_get_tas_spins())
# In this case, you can change configuration
# after opening the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.env.mutex_set_tas_spins(v2)
def test_mutex_setget_align(self) :
v = self.env.mutex_get_align()
v2 = 64
if v == 64 :
v2 = 128
self.env.mutex_set_align(v2)
self.assertEqual(v2, self.env.mutex_get_align())
# Requires a nonzero power of two
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, 0)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, 17)
self.env.mutex_set_align(2*v2)
self.assertEqual(2*v2, self.env.mutex_get_align())
# You can not change configuration after opening
# the environment.
self.env.open(self.homeDir, db.DB_CREATE)
self.assertRaises(db.DBInvalidArgError,
self.env.mutex_set_align, v2)
class DBEnv_log(DBEnv) :
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
def test_log_file(self) :
log_file = self.env.log_file((1, 1))
self.assertEqual("log.0000000001", log_file[-14:])
if db.version() >= (4, 4) :
# The version with transactions is checked in other test object
def test_log_printf(self) :
msg = "This is a test..."
self.env.log_printf(msg)
logc = self.env.log_cursor()
self.assertTrue(msg in (logc.last()[1]))
if db.version() >= (4, 7) :
def test_log_config(self) :
self.env.log_set_config(db.DB_LOG_DSYNC | db.DB_LOG_ZERO, 1)
self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
self.assertTrue(self.env.log_get_config(db.DB_LOG_ZERO))
self.env.log_set_config(db.DB_LOG_ZERO, 0)
self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC))
self.assertFalse(self.env.log_get_config(db.DB_LOG_ZERO))
class DBEnv_log_txn(DBEnv) :
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
if (db.version() >= (4, 5)) and (db.version() < (5, 2)) :
def test_tx_max(self) :
txns=[]
def tx() :
for i in xrange(self.env.get_tx_max()) :
txns.append(self.env.txn_begin())
tx()
self.assertRaises(MemoryError, tx)
# Abort the transactions before garbage collection,
# to avoid "warnings".
for i in txns :
i.abort()
if db.version() >= (4, 4) :
# The version without transactions is checked in other test object
def test_log_printf(self) :
msg = "This is a test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.commit()
logc = self.env.log_cursor()
logc.last() # Skip the commit
self.assertTrue(msg in (logc.prev()[1]))
msg = "This is another test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.abort() # Do not store the new message
logc.last() # Skip the abort
self.assertTrue(msg not in (logc.prev()[1]))
msg = "This is a third test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.commit() # Do not store the new message
logc.last() # Skip the commit
self.assertTrue(msg in (logc.prev()[1]))
class DBEnv_memp(DBEnv):
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG)
self.db = db.DB(self.env)
self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660)
def tearDown(self):
self.db.close()
del self.db
DBEnv.tearDown(self)
def test_memp_1_trickle(self) :
self.db.put("hi", "bye")
self.assertTrue(self.env.memp_trickle(100) > 0)
# Preserve the order, do "memp_trickle" test first
def test_memp_2_sync(self) :
self.db.put("hi", "bye")
self.env.memp_sync() # Full flush
# Nothing to do...
self.assertTrue(self.env.memp_trickle(100) == 0)
self.db.put("hi", "bye2")
self.env.memp_sync((1, 0)) # NOP, probably
# Something to do... or not
self.assertTrue(self.env.memp_trickle(100) >= 0)
self.db.put("hi", "bye3")
self.env.memp_sync((123, 99)) # Full flush
# Nothing to do...
self.assertTrue(self.env.memp_trickle(100) == 0)
def test_memp_stat_1(self) :
stats = self.env.memp_stat() # No param
self.assertTrue(len(stats)==2)
self.assertTrue("cache_miss" in stats[0])
stats = self.env.memp_stat(db.DB_STAT_CLEAR) # Positional param
self.assertTrue("cache_miss" in stats[0])
stats = self.env.memp_stat(flags=0) # Keyword param
self.assertTrue("cache_miss" in stats[0])
def test_memp_stat_2(self) :
stats=self.env.memp_stat()[1]
self.assertTrue(len(stats))==1
self.assertTrue("test" in stats)
self.assertTrue("page_in" in stats["test"])
class DBEnv_logcursor(DBEnv):
def setUp(self):
DBEnv.setUp(self)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
txn = self.env.txn_begin()
self.db = db.DB(self.env)
self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660, txn=txn)
txn.commit()
for i in ["2", "8", "20"] :
txn = self.env.txn_begin()
self.db.put(key = i, data = i*int(i), txn=txn)
txn.commit()
def tearDown(self):
self.db.close()
del self.db
DBEnv.tearDown(self)
def _check_return(self, value) :
self.assertTrue(isinstance(value, tuple))
self.assertEqual(len(value), 2)
self.assertTrue(isinstance(value[0], tuple))
self.assertEqual(len(value[0]), 2)
self.assertTrue(isinstance(value[0][0], int))
self.assertTrue(isinstance(value[0][1], int))
self.assertTrue(isinstance(value[1], str))
# Preserve test order
def test_1_first(self) :
logc = self.env.log_cursor()
v = logc.first()
self._check_return(v)
self.assertTrue((1, 1) < v[0])
self.assertTrue(len(v[1])>0)
def test_2_last(self) :
logc = self.env.log_cursor()
lsn_first = logc.first()[0]
v = logc.last()
self._check_return(v)
self.assertTrue(lsn_first < v[0])
def test_3_next(self) :
logc = self.env.log_cursor()
lsn_last = logc.last()[0]
self.assertEqual(logc.next(), None)
lsn_first = logc.first()[0]
v = logc.next()
self._check_return(v)
self.assertTrue(lsn_first < v[0])
self.assertTrue(lsn_last > v[0])
v2 = logc.next()
self.assertTrue(v2[0] > v[0])
self.assertTrue(lsn_last > v2[0])
v3 = logc.next()
self.assertTrue(v3[0] > v2[0])
self.assertTrue(lsn_last > v3[0])
def test_4_prev(self) :
logc = self.env.log_cursor()
lsn_first = logc.first()[0]
self.assertEqual(logc.prev(), None)
lsn_last = logc.last()[0]
v = logc.prev()
self._check_return(v)
self.assertTrue(lsn_first < v[0])
self.assertTrue(lsn_last > v[0])
v2 = logc.prev()
self.assertTrue(v2[0] < v[0])
self.assertTrue(lsn_first < v2[0])
v3 = logc.prev()
self.assertTrue(v3[0] < v2[0])
self.assertTrue(lsn_first < v3[0])
def test_5_current(self) :
logc = self.env.log_cursor()
logc.first()
v = logc.next()
self.assertEqual(v, logc.current())
def test_6_set(self) :
logc = self.env.log_cursor()
logc.first()
v = logc.next()
self.assertNotEqual(v, logc.next())
self.assertNotEqual(v, logc.next())
self.assertEqual(v, logc.set(v[0]))
def test_explicit_close(self) :
logc = self.env.log_cursor()
logc.close()
self.assertRaises(db.DBCursorClosedError, logc.next)
def test_implicit_close(self) :
logc = [self.env.log_cursor() for i in xrange(10)]
self.env.close() # This close should close too all its tree
for i in logc :
self.assertRaises(db.DBCursorClosedError, i.next)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnv_general))
suite.addTest(unittest.makeSuite(DBEnv_memp))
suite.addTest(unittest.makeSuite(DBEnv_logcursor))
suite.addTest(unittest.makeSuite(DBEnv_log))
suite.addTest(unittest.makeSuite(DBEnv_log_txn))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-3.0 |
rwarren14/robotframework | src/robot/writer/filewriters.py | 22 | 5515 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import csv
except ImportError:
# csv module is missing from IronPython < 2.7.1
csv = None
from robot import utils
from .formatters import TsvFormatter, TxtFormatter, PipeFormatter
from .htmlformatter import HtmlFormatter
from .htmltemplate import TEMPLATE_START, TEMPLATE_END
def FileWriter(context):
"""Creates and returns a FileWriter object.
:param context: Type of returned FileWriter is determined based on
`context.format`. `context` is also passed to created writer.
:type context: :py:class:`WritingContext`
"""
if context.format == context.html_format:
return HtmlFileWriter(context)
if context.format == context.tsv_format:
return TsvFileWriter(context)
if context.pipe_separated:
return PipeSeparatedTxtWriter(context)
return SpaceSeparatedTxtWriter(context)
class _DataFileWriter(object):
def __init__(self, formatter, configuration):
self._formatter = formatter
self._output = configuration.output
self._line_separator = configuration.line_separator
self._encoding = configuration.encoding
def write(self, datafile):
tables = [table for table in datafile if table]
for table in tables:
self._write_table(table, is_last=table is tables[-1])
def _write_table(self, table, is_last):
self._write_header(table)
self._write_rows(self._formatter.format_table(table))
if not is_last:
self._write_empty_row(table)
def _write_header(self, table):
self._write_row(self._formatter.format_header(table))
def _write_rows(self, rows):
for row in rows:
self._write_row(row)
def _write_empty_row(self, table):
self._write_row(self._formatter.empty_row_after(table))
def _encode(self, row):
return row.encode(self._encoding)
def _write_row(self, row):
raise NotImplementedError
class SpaceSeparatedTxtWriter(_DataFileWriter):
def __init__(self, configuration):
formatter = TxtFormatter(configuration.txt_column_count)
self._separator = ' ' * configuration.txt_separating_spaces
_DataFileWriter.__init__(self, formatter, configuration)
def _write_row(self, row):
line = self._separator.join(row).rstrip() + self._line_separator
self._output.write(self._encode(line))
class PipeSeparatedTxtWriter(_DataFileWriter):
_separator = ' | '
def __init__(self, configuration):
formatter = PipeFormatter(configuration.txt_column_count)
_DataFileWriter.__init__(self, formatter, configuration)
def _write_row(self, row):
row = self._separator.join(row)
if row:
row = '| ' + row + ' |'
self._output.write(self._encode(row + self._line_separator))
class TsvFileWriter(_DataFileWriter):
def __init__(self, configuration):
if not csv:
raise RuntimeError('No csv module found. '
'Writing tab separated format is not possible.')
formatter = TsvFormatter(configuration.tsv_column_count)
_DataFileWriter.__init__(self, formatter, configuration)
self._writer = self._get_writer(configuration)
def _get_writer(self, configuration):
# Custom dialect needed as a workaround for
# http://ironpython.codeplex.com/workitem/33627
dialect = csv.excel_tab()
dialect.lineterminator = configuration.line_separator
return csv.writer(configuration.output, dialect=dialect)
def _write_row(self, row):
self._writer.writerow([self._encode(c) for c in row])
class HtmlFileWriter(_DataFileWriter):
def __init__(self, configuration):
formatter = HtmlFormatter(configuration.html_column_count)
_DataFileWriter.__init__(self, formatter, configuration)
self._name = configuration.datafile.name
self._writer = utils.HtmlWriter(configuration.output,
configuration.line_separator,
encoding=self._encoding)
def write(self, datafile):
self._writer.content(TEMPLATE_START % {'NAME': self._name},
escape=False, replace_newlines=True)
_DataFileWriter.write(self, datafile)
self._writer.content(TEMPLATE_END, escape=False, replace_newlines=True)
def _write_table(self, table, is_last):
self._writer.start('table', {'id': table.type.replace(' ', ''),
'border': '1'})
_DataFileWriter._write_table(self, table, is_last)
self._writer.end('table')
def _write_row(self, row):
self._writer.start('tr')
for cell in row:
self._writer.element(cell.tag, cell.content, cell.attributes,
escape=False)
self._writer.end('tr')
| apache-2.0 |
actmd/Diamond | src/collectors/sqs/sqs.py | 56 | 2330 | # coding=utf-8
"""
The SQS collector collects metrics for one or more Amazon AWS SQS queues
#### Configuration
Below is an example configuration for the SQSCollector.
You can specify an arbitrary amount of regions
```
enabled = True
interval = 60
[regions]
[[region-code]]
access_key_id = '...'
secret_access_key = '''
queues = queue_name[,queue_name2[,..]]
```
Note: If you modify the SQSCollector configuration, you will need to
restart diamond.
#### Dependencies
* boto
"""
import diamond.collector
try:
from boto import sqs
except ImportError:
sqs = False
class SqsCollector(diamond.collector.Collector):
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SqsCollector, self).get_default_config()
config.update({
'path': 'sqs',
})
return config
def collect(self):
attribs = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible',
'ApproximateNumberOfMessagesDelayed',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout']
if not sqs:
self.log.error("boto module not found!")
return
for (region, region_cfg) in self.config['regions'].items():
assert 'access_key_id' in region_cfg
assert 'secret_access_key' in region_cfg
assert 'queues' in region_cfg
queues = region_cfg['queues'].split(',')
for queue_name in queues:
conn = sqs.connect_to_region(
region,
aws_access_key_id=region_cfg['access_key_id'],
aws_secret_access_key=region_cfg['secret_access_key'],
)
queue = conn.get_queue(queue_name)
for attrib in attribs:
d = queue.get_attributes(attrib)
self.publish(
'%s.%s.%s' % (region, queue_name, attrib),
d[attrib]
)
| mit |
xiaoyaozi5566/DynamicCache | src/arch/x86/isa/insts/simd128/integer/logical/por.py | 91 | 2658 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop POR_XMM_XMM {
mor xmml, xmml, xmmlm
mor xmmh, xmmh, xmmhm
};
def macroop POR_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
def macroop POR_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mor xmml, xmml, ufp1
mor xmmh, xmmh, ufp2
};
'''
| bsd-3-clause |
CiscoSystems/quantum | neutron/tests/base.py | 4 | 2827 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base Test Case for all Unit Tests"""
import logging
import os
import fixtures
from oslo.config import cfg
import stubout
import testtools
from neutron.openstack.common import exception
CONF = cfg.CONF
TRUE_STRING = ['True', '1']
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
if os.environ.get('OS_DEBUG') in TRUE_STRING:
_level = logging.DEBUG
else:
_level = logging.INFO
self.useFixture(fixtures.FakeLogger(format=LOG_FORMAT, level=_level))
test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(CONF.reset)
if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_STRING:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in TRUE_STRING:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
| apache-2.0 |
bodi000/odoo | addons/website_sale/models/product.py | 10 | 7529 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_attribue(osv.Model):
# TODO merge product.attribute, mrp.properties product_manufacturer_attributes
_name = "product.attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values'),
}
class product_attribute_value(osv.Model):
_name = "product.attribute.value"
_columns = {
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'name': fields.char('Value', translate=True, required=True),
}
class product_attribute_line(osv.Model):
_name = "product.attribute.line"
_order = 'attribute_id, value_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product', required=True),
'attribute_id': fields.many2one('product.attribute', 'attribute', required=True),
'value_id': fields.many2one('product.attribute.value', 'Textual Value'),
}
def onchange_attribute_id(self, cr, uid, ids, attribute_id, context=None):
return {'value': {'value_id': False}}
class product_style(osv.Model):
_name = "product.style"
_columns = {
'name' : fields.char('Style Name', required=True),
'html_class': fields.char('HTML Classes'),
}
class product_pricelist(osv.Model):
_inherit = "product.pricelist"
_columns = {
'code': fields.char('Promotional Code'),
}
class product_template(osv.Model):
_inherit = ["product.template", "website.seo.metadata"]
_order = 'website_published desc, website_sequence desc, name'
_name = 'product.template'
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.id,)
return res
_columns = {
'attribute_lines': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product attributes'),
# TODO FIXME tde: when website_mail/mail_thread.py inheritance work -> this field won't be necessary
'website_message_ids': fields.one2many(
'mail.message', 'res_id',
domain=lambda self: [
'&', ('model', '=', self._name), ('type', '=', 'comment')
],
string='Website Comments',
),
'website_published': fields.boolean('Available in the website'),
'website_description': fields.html('Description for the website'),
'alternative_product_ids': fields.many2many('product.template','product_alternative_rel','src_id','dest_id', string='Alternative Products', help='Appear on the product page'),
'accessory_product_ids': fields.many2many('product.template','product_accessory_rel','src_id','dest_id', string='Accessory Products', help='Appear on the shopping cart'),
'website_size_x': fields.integer('Size X'),
'website_size_y': fields.integer('Size Y'),
'website_style_ids': fields.many2many('product.style', string='Styles'),
'website_sequence': fields.integer('Sequence', help="Determine the display order in the Website E-commerce"),
'website_url': fields.function(_website_url, string="Website url", type="char"),
}
def _defaults_website_sequence(self, cr, uid, *l, **kwargs):
cr.execute('SELECT MAX(website_sequence)+1 FROM product_template')
next_sequence = cr.fetchone()[0] or 0
return next_sequence
_defaults = {
'website_size_x': 1,
'website_size_y': 1,
'website_sequence': _defaults_website_sequence,
'website_published': False,
}
def website_reorder(self, cr, uid, ids, operation=None, context=None):
if operation == "top":
cr.execute('SELECT MAX(website_sequence) FROM product_template')
seq = (cr.fetchone()[0] or 0) + 1
if operation == "bottom":
cr.execute('SELECT MIN(website_sequence) FROM product_template')
seq = (cr.fetchone()[0] or 0) -1
if operation == "up":
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence > %s AND website_published = %s ORDER BY website_sequence ASC LIMIT 1""" % (product.website_sequence, product.website_published))
prev = cr.fetchone()
if prev:
self.write(cr, uid, [prev[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': prev[1]}, context=context)
else:
return self.website_reorder(cr, uid, ids, operation='top', context=context)
if operation == "down":
product = self.browse(cr, uid, ids[0], context=context)
cr.execute(""" SELECT id, website_sequence FROM product_template
WHERE website_sequence < %s AND website_published = %s ORDER BY website_sequence DESC LIMIT 1""" % (product.website_sequence, product.website_published))
next = cr.fetchone()
if next:
self.write(cr, uid, [next[0]], {'website_sequence': product.website_sequence}, context=context)
return self.write(cr, uid, [ids[0]], {'website_sequence': next[1]}, context=context)
else:
return self.website_reorder(cr, uid, ids, operation='bottom', context=context)
return self.write(cr, uid, ids, {'website_sequence': seq}, context=context)
def img(self, cr, uid, ids, field='image_small', context=None):
return "/website/image?model=%s&field=%s&id=%s" % (self._name, field, ids[0])
class product_product(osv.Model):
_inherit = "product.product"
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = "/shop/product/%s" % (product.product_tmpl_id.id,)
return res
_columns = {
'website_url': fields.function(_website_url, string="Website url", type="char"),
}
def img(self, cr, uid, ids, field='image_small', context=None):
temp_id = self.browse(cr, uid, ids[0], context=context).product_tmpl_id.id
return "/website/image?model=product.template&field=%s&id=%s" % (field, temp_id)
# vim:et:
| agpl-3.0 |
keedio/hue | desktop/libs/librdbms/src/librdbms/design.py | 28 | 3677 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The HQLdesign class can (de)serialize a design to/from a QueryDict.
"""
import json
import logging
import django.http
from django.utils.translation import ugettext as _
from beeswax.design import normalize_form_dict, denormalize_form_dict, strip_trailing_semicolon,\
split_statements
LOG = logging.getLogger(__name__)
SERIALIZATION_VERSION = "0.0.1"
class SQLdesign(object):
"""
Represents an SQL design, with methods to perform (de)serialization.
"""
_QUERY_ATTRS = [ 'query', 'type', 'database', 'server' ]
def __init__(self, form=None, query_type=None):
"""Initialize the design from a valid form data."""
if form is not None:
self._data_dict = dict(query = normalize_form_dict(form, SQLdesign._QUERY_ATTRS))
if query_type is not None:
self._data_dict['query']['type'] = query_type
def dumps(self):
"""Returns the serialized form of the design in a string"""
dic = self._data_dict.copy()
dic['VERSION'] = SERIALIZATION_VERSION
return json.dumps(dic)
@property
def sql_query(self):
return self._data_dict['query']['query']
@property
def query(self):
return self._data_dict['query'].copy()
@property
def server(self):
return self._data_dict['query']['server']
@property
def database(self):
return self._data_dict['query']['database']
def get_query_dict(self):
# We construct the mform to use its structure and prefix. We don't actually bind data to the forms.
from beeswax.forms import QueryForm
mform = QueryForm()
mform.bind()
res = django.http.QueryDict('', mutable=True)
res.update(denormalize_form_dict(
self._data_dict['query'], mform.query, SQLdesign._QUERY_ATTRS))
return res
def get_query(self):
return self._data_dict["query"]
@property
def statement_count(self):
return len(self.statements)
def get_query_statement(self, n=0):
return self.statements[n]
@property
def statements(self):
sql_query = strip_trailing_semicolon(self.sql_query)
return [strip_trailing_semicolon(statement.strip()) for statement in split_statements(sql_query)]
@staticmethod
def loads(data):
"""Returns SQLdesign from the serialized form"""
dic = json.loads(data)
dic = dict(map(lambda k: (str(k), dic.get(k)), dic.keys()))
if dic['VERSION'] != SERIALIZATION_VERSION:
LOG.error('Design version mismatch. Found %s; expect %s' % (dic['VERSION'], SERIALIZATION_VERSION))
# Convert to latest version
del dic['VERSION']
if 'type' not in dic['query'] or dic['query']['type'] is None:
dic['query']['type'] = 0
if 'server' not in dic['query']:
raise RuntimeError(_('No server!'))
if 'database' not in dic['query']:
raise RuntimeError(_('No database!'))
design = SQLdesign()
design._data_dict = dic
return design | apache-2.0 |
joelthompson/ansible-modules-core | network/basics/uri.py | 5 | 16891 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Romeo Theriault <romeot () hawaii.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/uri.yml
import cgi
import shutil
import tempfile
import datetime
try:
import json
except ImportError:
import simplejson as json
DOCUMENTATION = '''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
required: true
default: null
dest:
description:
- path of where to download the file to (if desired). If I(dest) is a
directory, the basename of the file on the remote server will be used.
required: false
default: null
user:
description:
- username for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
password:
description:
- password for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
body:
description:
- The body of the http request/response to the web service. If C(body_format) is set
to 'json' it will take an already formated JSON string or convert a data structure
into JSON.
required: false
default: null
body_format:
description:
- The serialization format of the body. When set to json, encodes the
body argument, if needed, and automatically sets the Content-Type header accordingly.
required: false
choices: [ "raw", "json" ]
default: raw
version_added: "2.0"
method:
description:
- The HTTP method of the request or response. It MUST be uppercase.
required: false
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
default: "GET"
return_content:
description:
- Whether or not to return the body of the request as a "content" key in
the dictionary result. If the reported Content-type is
"application/json", then the JSON is additionally loaded into a key
called C(json) in the dictionary results.
required: false
choices: [ "yes", "no" ]
default: "no"
force_basic_auth:
description:
- The library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
required: false
choices: [ "all", "safe", "none" ]
default: "safe"
creates:
description:
- a filename, when it already exists, this step will not be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will not be run.
required: false
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the
request. Can also be comma separated list of status codes.
required: false
default: 200
timeout:
description:
- The socket level timeout in seconds
required: false
default: 30
HEADER_:
description:
- Any parameter starting with "HEADER_" is a sent with your request as a header.
For example, HEADER_Content-Type="application/json" would send the header
"Content-Type" along with your request with a value of "application/json".
This option is deprecated as of C(2.1) and may be removed in a future
release. Use I(headers) instead.
required: false
default: null
headers:
description:
- Add custom HTTP headers to a request in the format of a YAML hash
required: false
default: null
version_added: '2.1'
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates. Prior to 1.9.2 the code defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.2'
notes:
- The dependency on httplib2 was removed in Ansible 2.1
author: "Romeo Theriault (@romeotheriault)"
'''
EXAMPLES = '''
# Check that you can connect (GET) to a page and it returns a status 200
- uri: url=http://www.example.com
# Check that a page returns a status 200 and fail if the word AWESOME is not
# in the page contents.
- action: uri url=http://www.example.com return_content=yes
register: webpage
- action: fail
when: "'AWESOME' not in webpage.content"
# Create a JIRA issue
- uri:
url: https://your.jira.example.com/rest/api/2/issue/
method: POST
user: your_username
password: your_pass
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
# Login to a form based webpage, then use the returned cookie to
# access the app in later tasks
- uri:
url: https://your.form.based.auth.example.com/index.php
method: POST
body: "name=your_username&password=your_password&enter=Sign%20in"
status_code: 302
HEADER_Content-Type: "application/x-www-form-urlencoded"
register: login
- uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
HEADER_Cookie: "{{login.set_cookie}}"
# Queue build of a project in Jenkins:
- uri:
url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}"
method: GET
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
force_basic_auth: yes
status_code: 201
'''
def write_file(module, url, dest, content):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(urlparse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def absolute_location(url, location):
"""Attempts to create an absolute URL based on initial URL, and
next URL, specifically in the case of a ``Location`` header.
"""
if '://' in location:
return location
elif location.startswith('/'):
parts = urlparse.urlsplit(url)
base = url.replace(parts[2], '')
return '%s%s' % (base, location)
elif not location.startswith('/'):
base = os.path.dirname(url)
return '%s/%s' % (base, location)
else:
return location
def uri(module, url, dest, body, body_format, method, headers, socket_timeout):
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
redir_info = {}
r = {}
if dest is not None:
# Stash follow_redirects, in this block we don't want to follow
# we'll reset back to the supplied value soon
follow_redirects = module.params['follow_redirects']
module.params['follow_redirects'] = False
dest = os.path.expanduser(dest)
if os.path.isdir(dest):
# first check if we are redirected to a file download
_, redir_info = fetch_url(module, url, data=body,
headers=headers,
method=method,
timeout=socket_timeout)
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
if redir_info['status'] in (301, 302, 303, 307):
url = redir_info['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000')
headers['If-Modified-Since'] = tstamp
# Reset follow_redirects back to the stashed value
module.params['follow_redirects'] = follow_redirects
resp, info = fetch_url(module, url, data=body, headers=headers,
method=method, timeout=socket_timeout)
r['redirected'] = redirected or info['url'] != url
r.update(redir_info)
r.update(info)
try:
content = resp.read()
except AttributeError:
content = ''
return r, content, dest
def main():
argument_spec = url_argument_spec()
argument_spec.update(dict(
dest = dict(required=False, default=None, type='path'),
url_username = dict(required=False, default=None, aliases=['user']),
url_password = dict(required=False, default=None, aliases=['password']),
body = dict(required=False, default=None, type='raw'),
body_format = dict(required=False, default='raw', choices=['raw', 'json']),
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']),
return_content = dict(required=False, default='no', type='bool'),
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
creates = dict(required=False, default=None, type='path'),
removes = dict(required=False, default=None, type='path'),
status_code = dict(required=False, default=[200], type='list'),
timeout = dict(required=False, default=30, type='int'),
headers = dict(required=False, type='dict', default={})
))
module = AnsibleModule(
argument_spec=argument_spec,
check_invalid_arguments=False,
add_file_common_args=True
)
url = module.params['url']
body = module.params['body']
body_format = module.params['body_format'].lower()
method = module.params['method']
dest = module.params['dest']
return_content = module.params['return_content']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
dict_headers = module.params['headers']
if body_format == 'json':
# Encode the body unless its a string, then assume it is preformatted JSON
if not isinstance(body, basestring):
body = json.dumps(body)
dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is
# currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
for key, value in module.params.iteritems():
if key.startswith("HEADER_"):
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
if os.path.exists(creates):
module.exit_json(stdout="skipped, since %s exists" % creates,
changed=False, stderr=False, rc=0)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename do not exists. This allows idempotence
# of uri executions.
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0)
# Make the request
resp, content, dest = uri(module, url, dest, body, body_format, method,
dict_headers, socket_timeout)
resp['status'] = int(resp['status'])
# Write the file out if requested
if dest is not None:
if resp['status'] == 304:
changed = False
else:
write_file(module, url, dest, content)
# allow file attribute changes
changed = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
resp['path'] = dest
else:
changed = False
# Transmogrify the headers, replacing '-' with '_', since variables dont
# work with dashes.
uresp = {}
for key, value in resp.iteritems():
ukey = key.replace("-", "_")
uresp[ukey] = value
try:
uresp['location'] = absolute_location(url, uresp['location'])
except KeyError:
pass
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
u_content = unicode(content, content_encoding, errors='replace')
if content_type.startswith('application/json') or \
content_type.startswith('text/json'):
try:
js = json.loads(u_content)
uresp['json'] = js
except:
pass
else:
u_content = unicode(content, content_encoding, errors='replace')
if resp['status'] not in status_code:
uresp['msg'] = 'Status code was not %s: %s' % (status_code, uresp.get('msg', ''))
module.fail_json(content=u_content, **uresp)
elif return_content:
module.exit_json(changed=changed, content=u_content, **uresp)
else:
module.exit_json(changed=changed, **uresp)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
petrushev/txplaya | txplayagui/librarywidget.py | 1 | 4653 | import json
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QModelIndex, Qt
from PyQt5.QtWidgets import QWidget, QSpacerItem, QSizePolicy, QShortcut
from PyQt5.QtGui import QKeySequence
from txplayagui.ui.library import Ui_LibraryWidget
from txplayagui.library import LibraryModel
from txplayagui.utilities import unwrapMime
class LibraryWidget(Ui_LibraryWidget, QWidget):
rescanStarted = pyqtSignal()
itemsActivated = pyqtSignal(list)
def __init__(self, parent=None):
QWidget.__init__(self, parent)
Ui_LibraryWidget.setupUi(self, self)
self.rescanButton.show()
self.scanProgressBar.hide()
self.libraryModel = LibraryModel()
self.treeView.setModel(self.libraryModel)
self.libraryModel.toggleRow.connect(self.onToggleRow)
self.rescanButton.clicked.connect(self.rescanClicked)
self.treeView.doubleClicked.connect(self.onTreeViewDoubleClicked)
self.querySearchBox.textChanged.connect(self.onQueryChanged)
self.clearSearchButton.clicked.connect(self.onQueryClear)
# shortcuts
releaseSearchboxShortcut = QShortcut(QKeySequence('Esc'), self.querySearchBox)
releaseSearchboxShortcut.setContext(Qt.WidgetShortcut)
releaseSearchboxShortcut.activated.connect(self.onReleaseSearchbox)
scrollLibraryShortcut = QShortcut(QKeySequence(Qt.Key_Down), self.querySearchBox)
scrollLibraryShortcut.setContext(Qt.WidgetShortcut)
scrollLibraryShortcut.activated.connect(self.onScrollLibrary)
activateTracksShortcut = QShortcut(QKeySequence(Qt.Key_Return), self.treeView)
activateTracksShortcut.setContext(Qt.WidgetShortcut)
activateTracksShortcut.activated.connect(self.onActivateTracks)
@pyqtSlot()
def rescanClicked(self):
from txplayagui.client import rescanLibrary
self.rescanButton.hide()
self.scanControlsLayout.removeItem(self.scanControlsLayout.itemAt(2))
self.scanProgressBar.show()
self.scanResponse = rescanLibrary()
self.scanResponse.lineReceived.connect(self.scanProgress)
self.rescanStarted.emit()
@pyqtSlot(str)
def scanProgress(self, progress):
data = json.loads(progress.rstrip())
if 'scanprogress' in data:
progress = data['scanprogress']
self.setProgress(progress)
else:
self.scanResponse.close()
self.scanResponse.deleteLater()
self.rescanFinished(data['library'])
@pyqtSlot(int, QModelIndex, bool)
def onToggleRow(self, row, parentIndex, isShown):
self.treeView.setRowHidden(row, parentIndex, not isShown)
@pyqtSlot(QModelIndex)
def onTreeViewDoubleClicked(self, index):
hashes = self._getHashes(index)
if len(hashes) == 0:
return
self.itemsActivated.emit(hashes)
@pyqtSlot(unicode)
def onQueryChanged(self, query):
if len(query) > 2:
self.libraryModel.filter(query)
elif query == '':
return self.libraryModel.showAll()
@pyqtSlot()
def onQueryClear(self):
self.querySearchBox.setText('')
self.querySearchBox.setFocus()
def onReleaseSearchbox(self):
self.setFocus()
def onScrollLibrary(self):
self.treeView.setCurrentIndex(self.libraryModel.headIndex())
self.treeView.setFocus()
def onActivateTracks(self):
collectedHashes = []
for index in self.treeView.selectedIndexes():
for hash_ in self._getHashes(index):
if hash_ not in collectedHashes:
collectedHashes.append(hash_)
if len(collectedHashes) == 0:
return
self.itemsActivated.emit(collectedHashes)
def _getHashes(self, index):
mimeData = unwrapMime(self.libraryModel.mimeData([index]))
item = mimeData['items'][0]
try:
return [item['hash']]
except KeyError:
if 'album' in item:
return self.libraryModel.albumHashes(index)
return []
def setProgress(self, value):
self.scanProgressBar.setValue(value)
def rescanFinished(self, data):
self.libraryModel.loadData(data)
self.rescanButton.show()
spacerItem = QSpacerItem(0, 0, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.scanControlsLayout.addItem(spacerItem)
self.scanProgressBar.hide()
self.scanProgressBar.setValue(0)
# apply filter if active
query = self.querySearchBox.text().lower()
if len(query) > 2:
self.libraryModel.filter(query)
| gpl-3.0 |
hyperNURb/ggrc-core | src/ggrc_risks/__init__.py | 2 | 2680 | # Copyright (C) 2015 Reciprocity, Inc - All Rights Reserved
# Unauthorized use, copying, distribution, displaying, or public performance
# of this file, via any medium, is strictly prohibited. All information
# contained herein is proprietary and confidential and may not be shared
# with any third party without the express written consent of Reciprocity, Inc.
# Created By: [email protected]
# Maintained By: [email protected]
from flask import Blueprint
from ggrc.services.registry import service
import ggrc_risks.models as models
from ggrc_basic_permissions.contributed_roles import RoleContributions
from ggrc_risks.converters import IMPORTABLE
from ggrc.models import all_models
import ggrc_risks.views
# Initialize signal handler for status changes
from blinker import Namespace
signals = Namespace()
status_change = signals.signal(
'Status Changed',
"""
This is used to signal any listeners of any changes in model object status
attribute
""")
# Initialize Flask Blueprint for extension
blueprint = Blueprint(
'ggrc_risks',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/ggrc_risks',
)
_risk_object_types = [
"Program",
"Regulation", "Standard", "Policy", "Contract",
"Objective", "Control", "Section", "Clause",
"System", "Process",
"DataAsset", "Facility", "Market", "Product", "Project"
]
for type_ in _risk_object_types:
model = getattr(all_models, type_)
model.__bases__ = (
models.risk_object.Riskable,
) + model.__bases__
model.late_init_riskable()
def get_public_config(current_user):
"""Expose additional permissions-dependent config to client.
"""
return {}
def contributed_services():
return [
service('risks', models.Risk),
service('risk_objects', models.RiskObject),
service('threat_actors', models.ThreatActor),
]
def contributed_object_views():
from . import models
from ggrc.views.registry import object_view
return [
object_view(models.Risk),
object_view(models.ThreatActor),
]
# Initialize non-RESTful views
def init_extra_views(app):
ggrc_risks.views.init_extra_views(app)
class RiskRoleContributions(RoleContributions):
contributions = {
'Creator': {
'read': [],
'create': ['ThreatActor', 'Risk'],
},
'Editor': {
'read': ['ThreatActor', 'Risk'],
'create': ['ThreatActor', 'Risk'],
},
'Reader': {
'read': ['ThreatActor', 'Risk'],
'create': ['ThreatActor', 'Risk'],
},
}
ROLE_CONTRIBUTIONS = RiskRoleContributions()
contributed_importables = IMPORTABLE
| apache-2.0 |
MER-GROUP/intellij-community | python/lib/Lib/distutils/dep_util.py | 85 | 3570 | """distutils.dep_util
Utility functions for simple, timestamp-based dependency of files
and groups of files; also, function based entirely on such
timestamp dependency analysis."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: dep_util.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.errors import DistutilsFileError
def newer (source, target):
"""Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
Raise DistutilsFileError if 'source' does not exist.
"""
if not os.path.exists(source):
raise DistutilsFileError, "file '%s' does not exist" % source
if not os.path.exists(target):
return 1
from stat import ST_MTIME
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
# newer ()
def newer_pairwise (sources, targets):
"""Walk two filename lists in parallel, testing if each source is newer
than its corresponding target. Return a pair of lists (sources,
targets) where source is newer than target, according to the semantics
of 'newer()'.
"""
if len(sources) != len(targets):
raise ValueError, "'sources' and 'targets' must be same length"
# build a pair of lists (sources, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources)):
if newer(sources[i], targets[i]):
n_sources.append(sources[i])
n_targets.append(targets[i])
return (n_sources, n_targets)
# newer_pairwise ()
def newer_group (sources, target, missing='error'):
"""Return true if 'target' is out-of-date with respect to any file
listed in 'sources'. In other words, if 'target' exists and is newer
than every file in 'sources', return false; otherwise return true.
'missing' controls what we do when a source file is missing; the
default ("error") is to blow up with an OSError from inside 'stat()';
if it is "ignore", we silently drop any missing source files; if it is
"newer", any missing source files make us assume that 'target' is
out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
carry out commands that wouldn't work because inputs are missing, but
that doesn't matter because you're not actually going to run the
commands).
"""
# If the target doesn't even exist, then it's definitely out-of-date.
if not os.path.exists(target):
return 1
# Otherwise we have to find out the hard way: if *any* source file
# is more recent than 'target', then 'target' is out-of-date and
# we can immediately return true. If we fall through to the end
# of the loop, then 'target' is up-to-date and we return false.
from stat import ST_MTIME
target_mtime = os.stat(target)[ST_MTIME]
for source in sources:
if not os.path.exists(source):
if missing == 'error': # blow up when we stat() the file
pass
elif missing == 'ignore': # missing source dropped from
continue # target's dependency list
elif missing == 'newer': # missing source means target is
return 1 # out-of-date
source_mtime = os.stat(source)[ST_MTIME]
if source_mtime > target_mtime:
return 1
else:
return 0
# newer_group ()
| apache-2.0 |
bbbbanjax/CloudBot | plugins/remind.py | 2 | 5853 | """
remind.py
Allows users to add reminders for various tasks.
Created By:
- Pangea <https://github.com/PangeaCake>
- Luke Rogers <https://github.com/lukeroge>
License: GPL v3
"""
from datetime import datetime
import time
import asyncio
from sqlalchemy import Table, Column, String, DateTime, PrimaryKeyConstraint
from cloudbot import hook
from cloudbot.util import botvars
from cloudbot.util.timeparse import time_parse
from cloudbot.util.timeformat import format_time, time_since
from cloudbot.util import colors
table = Table(
'reminders',
botvars.metadata,
Column('network', String(50)),
Column('added_user', String(30)),
Column('added_time', DateTime),
Column('added_chan', String(50)),
Column('message', String(512)),
Column('remind_time', DateTime),
PrimaryKeyConstraint('network', 'added_user', 'added_time')
)
@asyncio.coroutine
def delete_reminder(async, db, network, remind_time, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.remind_time == remind_time) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def delete_all(async, db, network, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def add_reminder(async, db, network, added_user, added_chan, message, remind_time, added_time):
query = table.insert().values(
network=network.lower(),
added_user=added_user.lower(),
added_time=added_time,
added_chan=added_chan.lower(),
message=message,
remind_time=remind_time
)
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
@hook.on_start()
def load_cache(async, db):
global reminder_cache
reminder_cache = []
for network, remind_time, added_time, user, message in (yield from async(_load_cache_db, db)):
reminder_cache.append((network, remind_time, added_time, user, message))
def _load_cache_db(db):
query = db.execute(table.select())
return [(row["network"], row["remind_time"], row["added_time"], row["added_user"], row["message"]) for row in query]
@asyncio.coroutine
@hook.periodic(30, initial_interval=30)
def check_reminders(bot, async, db):
current_time = datetime.now()
for reminder in reminder_cache:
network, remind_time, added_time, user, message = reminder
if remind_time <= current_time:
if network not in bot.connections:
# connection is invalid
yield from add_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
continue
conn = bot.connections[network]
if not conn.ready:
return
remind_text = colors.parse(time_since(added_time, count=2))
alert = colors.parse("{}, you have a reminder from $(b){}$(clear) ago!".format(user, remind_text))
conn.message(user, alert)
conn.message(user, '"{}"'.format(message))
delta = (remind_time-added_time).seconds
if delta > (30*60):
late_time = time_since(remind_time, count=2)
late = "(I'm sorry for delivering this message $(b){}$(clear) late," \
" it seems I was unable to deliver it on time)".format(late_time)
conn.message(user, colors.parse(late))
yield from delete_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
@asyncio.coroutine
@hook.command('remind', 'reminder')
def remind(text, nick, chan, db, conn, notice, async):
"""<1 minute, 30 seconds>: <do task> -- reminds you to <do task> in <1 minute, 30 seconds>"""
count = len([x for x in reminder_cache if x[0] == conn.name and x[3] == nick.lower()])
if text == "clear":
if count == 0:
return "You have no reminders to delete."
yield from delete_all(async, db, conn.name, nick)
yield from load_cache(async, db)
return "Deleted all ({}) reminders for {}!".format(count, nick)
# split the input on the first ":"
parts = text.split(":", 1)
if len(parts) == 1:
# user didn't add a message, send them help
notice(remind.__doc__)
return
if count > 10:
return "Sorry, you already have too many reminders queued (10), you will need to wait or " \
"clear your reminders to add any more."
time_string = parts[0].strip()
message = colors.strip_all(parts[1].strip())
# get the current time in both DateTime and Unix Epoch
current_epoch = time.time()
current_time = datetime.fromtimestamp(current_epoch)
# parse the time input, return error if invalid
seconds = time_parse(time_string)
if not seconds:
return "Invalid input."
if seconds > 2764800 or seconds < 60:
return "Sorry, remind input must be more then a minute, and less then one month."
# work out the time to remind the user, and check if that time is in the past
remind_time = datetime.fromtimestamp(current_epoch + seconds)
if remind_time < current_time:
return "I can't remind you in the past!"
# finally, add the reminder and send a confirmation message
yield from add_reminder(async, db, conn.name, nick, chan, message, remind_time, current_time)
yield from load_cache(async, db)
remind_text = format_time(seconds, count=2)
output = "Alright, I'll remind you \"{}\" in $(b){}$(clear)!".format(message, remind_text)
return colors.parse(output)
| gpl-3.0 |
Lilykos/invenio | invenio/modules/upgrader/upgrades/invenio_2013_09_13_new_bibEDITCACHE.py | 15 | 1408 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New bibedit cache (bibEDITCACHE) table"
def do_upgrade():
""" Implement your upgrades here """
run_sql("""CREATE TABLE IF NOT EXISTS `bibEDITCACHE` (
`id_bibrec` mediumint(8) unsigned NOT NULL,
`uid` int(15) unsigned NOT NULL,
`data` LONGBLOB,
`post_date` datetime NOT NULL,
`is_active` tinyint(1) NOT NULL DEFAULT 1,
PRIMARY KEY (`id_bibrec`, `uid`),
INDEX `post_date` (`post_date`)
) ENGINE=MyISAM""")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
return 1
| gpl-2.0 |
angelicadly/prog-script | tekton-master/backend/appengine/routes/courses/rest.py | 28 | 1086 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from tekton.gae.middleware.json_middleware import JsonResponse
from course_app import facade
def index():
cmd = facade.list_courses_cmd()
course_list = cmd()
short_form=facade.course_short_form()
course_short = [short_form.fill_with_model(m) for m in course_list]
return JsonResponse(course_short)
def save(**course_properties):
cmd = facade.save_course_cmd(**course_properties)
return _save_or_update_json_response(cmd)
def update(course_id, **course_properties):
cmd = facade.update_course_cmd(course_id, **course_properties)
return _save_or_update_json_response(cmd)
def delete(course_id):
facade.delete_course_cmd(course_id)()
def _save_or_update_json_response(cmd):
try:
course = cmd()
except CommandExecutionException:
return JsonResponse({'errors': cmd.errors})
short_form=facade.course_short_form()
return JsonResponse(short_form.fill_with_model(course))
| mit |
swarna-k/MyDiary | flask/lib/python2.7/site-packages/wtforms/csrf/session.py | 193 | 3056 | """
A provided CSRF implementation which puts CSRF data in a session.
This can be used fairly comfortably with many `request.session` type
objects, including the Werkzeug/Flask session store, Django sessions, and
potentially other similar objects which use a dict-like API for storing
session keys.
The basic concept is a randomly generated value is stored in the user's
session, and an hmac-sha1 of it (along with an optional expiration time,
for extra security) is used as the value of the csrf_token. If this token
validates with the hmac of the random value + expiration time, and the
expiration time is not passed, the CSRF validation will pass.
"""
from __future__ import unicode_literals
import hmac
import os
from hashlib import sha1
from datetime import datetime, timedelta
from ..validators import ValidationError
from .core import CSRF
__all__ = ('SessionCSRF', )
class SessionCSRF(CSRF):
TIME_FORMAT = '%Y%m%d%H%M%S'
def setup_form(self, form):
self.form_meta = form.meta
return super(SessionCSRF, self).setup_form(form)
def generate_csrf_token(self, csrf_token_field):
meta = self.form_meta
if meta.csrf_secret is None:
raise Exception('must set `csrf_secret` on class Meta for SessionCSRF to work')
if meta.csrf_context is None:
raise TypeError('Must provide a session-like object as csrf context')
session = self.session
if 'csrf' not in session:
session['csrf'] = sha1(os.urandom(64)).hexdigest()
if self.time_limit:
expires = (self.now() + self.time_limit).strftime(self.TIME_FORMAT)
csrf_build = '%s%s' % (session['csrf'], expires)
else:
expires = ''
csrf_build = session['csrf']
hmac_csrf = hmac.new(meta.csrf_secret, csrf_build.encode('utf8'), digestmod=sha1)
return '%s##%s' % (expires, hmac_csrf.hexdigest())
def validate_csrf_token(self, form, field):
meta = self.form_meta
if not field.data or '##' not in field.data:
raise ValidationError(field.gettext('CSRF token missing'))
expires, hmac_csrf = field.data.split('##', 1)
check_val = (self.session['csrf'] + expires).encode('utf8')
hmac_compare = hmac.new(meta.csrf_secret, check_val, digestmod=sha1)
if hmac_compare.hexdigest() != hmac_csrf:
raise ValidationError(field.gettext('CSRF failed'))
if self.time_limit:
now_formatted = self.now().strftime(self.TIME_FORMAT)
if now_formatted > expires:
raise ValidationError(field.gettext('CSRF token expired'))
def now(self):
"""
Get the current time. Used for test mocking/overriding mainly.
"""
return datetime.now()
@property
def time_limit(self):
return getattr(self.form_meta, 'csrf_time_limit', timedelta(minutes=30))
@property
def session(self):
return getattr(self.form_meta.csrf_context, 'session', self.form_meta.csrf_context)
| bsd-3-clause |
Sotera/aggregate-micro-paths | hive-streaming/AggregateMicroPath.py | 1 | 9018 | # Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
import sys
import subprocess
from optparse import OptionParser
import sys
#Add the conf path to our path so we can call the blanketconfig
sys.path.append('conf')
from config import AggregateMicroPathConfig
#Differences are the sort order and the table schema for creation
#
# Subprocess wrapper to exit on errors.
#
def subprocessCall(argsList,quitOnError=True,stdout=None):
returnCode = subprocess.call(argsList,stdout=stdout)
if (quitOnError and 0 != returnCode):
print "Error executing subprocess:\n"
print " ".join(argsList)
exit(1)
return returnCode
#
# print usage to command line and exit
#
def printUsageAndExit(parser):
parser.print_help()
exit(1)
#
# create a new hive table
#
def create_new_hive_table(database_name,table_name,table_schema):
hql_script = """
DROP TABLE """+database_name+"""."""+table_name+""";
CREATE TABLE """+database_name+"""."""+table_name+""" ( """+table_schema+""" )
;"""
subprocessCall(["hive","-e",hql_script])
#
# Extract paths from conf/osm.ini initial data and store into a new table
#
def extract_paths(conf):
table_schema = "id string, alat string, blat string, alon string, blon string, adt string, bdt string, time string, distance string, velocity string"
create_new_hive_table(conf.database_name,"micro_path_track_extract_" + conf.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
set mapred.reduce.tasks=96;
set mapred.map.tasks=96;
ADD FILES conf/config.py conf/"""+conf.config_file+""" scripts/extract_path_segments.py;
FROM(
SELECT """+conf.table_schema_id+""","""+conf.table_schema_dt+""","""+conf.table_schema_lat+""","""+conf.table_schema_lon+"""
FROM """ + conf.database_name + """.""" + conf.table_name + """
DISTRIBUTE BY """+conf.table_schema_id+"""
SORT BY """+conf.table_schema_id+""","""+conf.table_schema_dt+""" asc
) map_out
INSERT OVERWRITE TABLE """ + conf.database_name + """.micro_path_track_extract_""" + conf.table_name + """
SELECT TRANSFORM(map_out."""+conf.table_schema_id+""", map_out."""+conf.table_schema_dt+""", map_out."""+conf.table_schema_lat+""", map_out."""+conf.table_schema_lon+""")
USING \"python extract_path_segments.py """ + conf.config_file + """\"
AS id,alat,blat,alon,blon,adt,bdt,time,distance,velocity
;
"""
subprocessCall(["hive","-e",hql_script])
#
# Extract trip line intersects from paths
#
def extract_trip_line_intersects(configuration):
table_schema = "intersectX string, intersectY string, dt string, velocity float, direction float, track_id string"
create_new_hive_table(configuration.database_name,"micro_path_tripline_bins_" + configuration.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
ADD FILES conf/config.py scripts/tripline_bins.py conf/"""+configuration.config_file+""";
FROM """ + configuration.database_name + """.micro_path_track_extract_""" + configuration.table_name + """
INSERT OVERWRITE TABLE """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
SELECT TRANSFORM(alat, alon, blat, blon, adt, bdt, velocity, id)
USING \"python tripline_bins.py """ + configuration.config_file + """ \"
AS intersectX,intersectY,dt,velocity,direction,track_id
;
"""
print("***hql_script***")
print(str(hql_script))
subprocessCall(["hive","-e",hql_script])
#
# take values form micro_path_tripline_bins and aggregate the counts
#
def aggregate_intersection_list(configuration):
table_schema ="x string, y string, ids string, dt string"
create_new_hive_table(configuration.database_name,"micro_path_intersect_list_" + configuration.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
set mapred.map.tasks=96;
set mapred.reduce.tasks=96;
INSERT OVERWRITE TABLE """ + configuration.database_name + """.micro_path_intersect_list_""" + configuration.table_name + """
SELECT
intersectX,intersectY,dt
STUFF((
SELECT ', ' + [Name] + ':' + CAST([Value] AS VARCHAR(MAX))
FROM #YourTable
WHERE (ID = Results.ID)
FOR XML PATH(''),TYPE).value('(./text())[1]','VARCHAR(MAX)')
,1,2,'') AS ids
FROM """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
GROUP BY intersectX,intersectY,dt
SELECT intersectX,intersectY,ids,dt
FROM """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
GROUP BY intersectX,intersectY,dt
;
"""
subprocessCall(["hive","-e",hql_script])
#
# take values form micro_path_tripline_bins and aggregate the counts
#
def aggregate_intersection_points(configuration):
table_schema ="x string, y string, value int, dt string"
create_new_hive_table(configuration.database_name,"micro_path_intersect_counts_" + configuration.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
set mapred.map.tasks=96;
set mapred.reduce.tasks=96;
INSERT OVERWRITE TABLE """ + configuration.database_name + """.micro_path_intersect_counts_""" + configuration.table_name + """
SELECT intersectX,intersectY,count(1),dt
FROM """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
GROUP BY intersectX,intersectY,dt
;
"""
subprocessCall(["hive","-e",hql_script])
def aggregate_intersection_velocity(configuration):
table_schema ="x string, y string, velocity float, dt string"
create_new_hive_table(configuration.database_name,"micro_path_intersect_velocity_" + configuration.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
set mapred.map.tasks=96;
set mapred.reduce.tasks=96;
INSERT OVERWRITE TABLE """ + configuration.database_name + """.micro_path_intersect_velocity_""" + configuration.table_name + """
SELECT intersectX,intersectY,avg(velocity),dt
FROM """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
GROUP BY intersectX,intersectY,dt
;
"""
subprocessCall(["hive","-e",hql_script])
def aggregate_intersection_direction(configuration):
table_schema ="x string, y string, direction int, dt string"
create_new_hive_table(configuration.database_name,"micro_path_intersect_direction_" + configuration.table_name,table_schema)
#hadoop streaming to extract paths
hql_script = """
set mapred.map.tasks=96;
set mapred.reduce.tasks=96;
INSERT OVERWRITE TABLE """ + configuration.database_name + """.micro_path_intersect_direction_""" + configuration.table_name + """
SELECT intersectX,intersectY,avg(direction),dt
FROM """ + configuration.database_name + """.micro_path_tripline_bins_""" + configuration.table_name + """
GROUP BY intersectX,intersectY,dt
;
"""
subprocessCall(["hive","-e",hql_script])
#
#
#
def main(config_file):
start_time = time()
print('Start time: ' + str(start_time))
print("Loading config from conf/[{0}]").format(config_file)
configuration = AggregateMicroPathConfig(config_file, "conf/")
print("extracting path data")
# create a new table and extract path data
extract_paths(configuration)
# emit points where segemnts intersect with trip line blankets
print("emit trip line blanket intersects")
extract_trip_line_intersects(configuration)
# aggregate intersection points
print ("aggregate intersection points")
aggregate_intersection_points(configuration)
# aggregate intersection velocity
print ("aggregate intersection velocity")
aggregate_intersection_velocity(configuration)
# aggregate intersection vdirection
print ("aggregate intersection direction")
aggregate_intersection_direction(configuration)
print('End time: ' + str(time() - start_time))
#
# Process command line arguments and run main
#
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-c","--config",
dest="configFile",
help="REQUIRED: name of configuration file")
(options,args) = parser.parse_args()
if not options.configFile:
printUsageAndExit(parser)
main(options.configFile)
| apache-2.0 |
PongPi/isl-odoo | addons/account/wizard/account_report_common_journal.py | 385 | 2942 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_journal_report(osv.osv_memory):
_name = 'account.common.journal.report'
_description = 'Account Common Journal Report'
_inherit = "account.common.report"
_columns = {
'amount_currency': fields.boolean("With Currency", help="Print Report with the currency column if the currency differs from the company currency."),
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = super(account_common_journal_report, self)._build_contexts(cr, uid, ids, data, context=context)
if data['form']['filter'] == 'filter_date':
cr.execute('SELECT period_id FROM account_move_line WHERE date >= %s AND date <= %s', (data['form']['date_from'], data['form']['date_to']))
result['periods'] = map(lambda x: x[0], cr.fetchall())
elif data['form']['filter'] == 'filter_period':
result['periods'] = self.pool.get('account.period').build_ctx_periods(cr, uid, data['form']['period_from'], data['form']['period_to'])
return result
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['amount_currency'], context=context)[0])
fy_ids = data['form']['fiscalyear_id'] and [data['form']['fiscalyear_id']] or self.pool.get('account.fiscalyear').search(cr, uid, [('state', '=', 'draft')], context=context)
period_list = data['form']['periods'] or self.pool.get('account.period').search(cr, uid, [('fiscalyear_id', 'in', fy_ids)], context=context)
data['form']['active_ids'] = self.pool.get('account.journal.period').search(cr, uid, [('journal_id', 'in', data['form']['journal_ids']), ('period_id', 'in', period_list)], context=context)
return data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Multiscale-Genomics/mg-process-fastq | process_mnaseseq.py | 1 | 6459 | #!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from utils import remap
from tool.bwa_aligner import bwaAlignerTool
from tool.inps import inps
# ------------------------------------------------------------------------------
class process_mnaseseq(Workflow):
"""
Functions for downloading and processing Mnase-seq FastQ files. Files are
downloaded from the European Nucleotide Archive (ENA), then aligned,
filtered and analysed for peak calling
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing MNase-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing MNase-Seq FastQ data. Pipeline aligns
the FASTQ files to the genome using BWA. iNPS is then used for peak
calling to identify nucleosome position sites within the genome.
Parameters
----------
files_ids : list
List of file locations
metadata : list
Required meta data
Returns
-------
outputfiles : list
List of locations for the output bam, bed and tsv files
"""
output_metadata = {}
if "genome_public" in input_files:
align_input_files = remap(
input_files, genome="genome_public", loc="loc", index="index_public")
align_input_file_meta = remap(
metadata, genome="genome_public", loc="loc", index="index_public")
else:
align_input_files = remap(input_files, "genome", "loc", "index")
align_input_file_meta = remap(metadata, "genome", "loc", "index")
bwa = bwaAlignerTool()
logger.progress("BWA ALN Aligner", status="RUNNING")
bwa_files, bwa_meta = bwa.run(
align_input_files, align_input_file_meta,
{"output": output_files["bam"], "bai": output_files["bai"]}
)
logger.progress("BWA ALN Aligner", status="DONE")
output_files_generated = {}
try:
output_files_generated["bam"] = bwa_files["bam"]
output_metadata["bam"] = bwa_meta["bam"]
tool_name = output_metadata['bam'].meta_data['tool']
output_metadata['bam'].meta_data['tool_description'] = tool_name
output_metadata['bam'].meta_data['tool'] = "process_mnaseseq"
output_files_generated["bai"] = bwa_files["bai"]
output_metadata["bai"] = bwa_meta["bai"]
tool_name = output_metadata['bai'].meta_data['tool']
output_metadata['bai'].meta_data['tool_description'] = tool_name
output_metadata['bai'].meta_data['tool'] = "process_mnaseseq"
except KeyError:
logger.fatal("BWA Alignment failed")
inps_tool = inps()
logger.progress("iNPS Peak Caller", status="RUNNING")
inps_files, inps_meta = inps_tool.run(
remap(bwa_files, "bam"),
remap(bwa_meta, "bam"),
{"bed": output_files["bed"]}
)
logger.progress("iNPS Peak Caller", status="DONE")
try:
output_files_generated["bed"] = inps_files["bed"]
output_metadata["bed"] = inps_meta["bed"]
tool_name = output_metadata['bed'].meta_data['tool']
output_metadata['bed'].meta_data['tool_description'] = tool_name
output_metadata['bed'].meta_data['tool'] = "process_mnaseseq"
except KeyError:
logger.fatal("BWA Alignment failed")
print("MNASESEQ RESULTS:", output_metadata)
return output_files, output_metadata
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_mnaseseq,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="MNase-seq peak calling")
PARSER.add_argument(
"--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| apache-2.0 |
rakeshmi/cinder | cinder/volume/drivers/netapp/dataontap/iscsi_7mode.py | 7 | 3256 | # Copyright (c) 2014 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (7-mode) iSCSI storage systems.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_7mode
LOG = logging.getLogger(__name__)
class NetApp7modeISCSIDriver(driver.ISCSIDriver):
"""NetApp 7-mode iSCSI volume driver."""
DRIVER_NAME = 'NetApp_iSCSI_7mode_direct'
def __init__(self, *args, **kwargs):
super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
self.library = block_7mode.NetAppBlockStorage7modeLibrary(
self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh)
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_iscsi(volume, connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_iscsi(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
| apache-2.0 |
LucianU/ThinkStats2 | code/probability.py | 69 | 4286 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import nsfg
import first
import thinkstats2
import thinkplot
def MakeHists(live):
"""Plot Hists for live births
live: DataFrame
others: DataFrame
"""
hist = thinkstats2.Hist(np.floor(live.agepreg), label='agepreg')
thinkplot.PrePlot(2, cols=2)
thinkplot.SubPlot(1)
thinkplot.Hist(hist)
thinkplot.Config(xlabel='years',
ylabel='frequency',
axis=[0, 45, 0, 700])
thinkplot.SubPlot(2)
thinkplot.Pmf(hist)
thinkplot.Save(root='probability_agepreg_hist',
xlabel='years',
axis=[0, 45, 0, 700])
def MakeFigures(firsts, others):
"""Plot Pmfs of pregnancy length.
firsts: DataFrame
others: DataFrame
"""
# plot the PMFs
first_pmf = thinkstats2.Pmf(firsts.prglngth, label='first')
other_pmf = thinkstats2.Pmf(others.prglngth, label='other')
width = 0.45
thinkplot.PrePlot(2, cols=2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='weeks',
ylabel='probability',
axis=[27, 46, 0, 0.6])
thinkplot.PrePlot(2)
thinkplot.SubPlot(2)
thinkplot.Pmfs([first_pmf, other_pmf])
thinkplot.Save(root='probability_nsfg_pmf',
xlabel='weeks',
axis=[27, 46, 0, 0.6])
# plot the differences in the PMFs
weeks = range(35, 46)
diffs = []
for week in weeks:
p1 = first_pmf.Prob(week)
p2 = other_pmf.Prob(week)
diff = 100 * (p1 - p2)
diffs.append(diff)
thinkplot.Bar(weeks, diffs)
thinkplot.Save(root='probability_nsfg_diffs',
title='Difference in PMFs',
xlabel='weeks',
ylabel='percentage points',
legend=False)
def BiasPmf(pmf, label=''):
"""Returns the Pmf with oversampling proportional to value.
If pmf is the distribution of true values, the result is the
distribution that would be seen if values are oversampled in
proportion to their values; for example, if you ask students
how big their classes are, large classes are oversampled in
proportion to their size.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
"""
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
def UnbiasPmf(pmf, label=''):
"""Returns the Pmf with oversampling proportional to 1/value.
Args:
pmf: Pmf object.
label: string label for the new Pmf.
Returns:
Pmf object
"""
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, 1.0/x)
new_pmf.Normalize()
return new_pmf
def ClassSizes():
"""Generate PMFs of observed and actual class size.
"""
# start with the actual distribution of class sizes from the book
d = { 7: 8, 12: 8, 17: 14, 22: 4,
27: 6, 32: 12, 37: 8, 42: 3, 47: 2 }
# form the pmf
pmf = thinkstats2.Pmf(d, label='actual')
print('mean', pmf.Mean())
print('var', pmf.Var())
# compute the biased pmf
biased_pmf = BiasPmf(pmf, label='observed')
print('mean', biased_pmf.Mean())
print('var', biased_pmf.Var())
# unbias the biased pmf
unbiased_pmf = UnbiasPmf(biased_pmf, label='unbiased')
print('mean', unbiased_pmf.Mean())
print('var', unbiased_pmf.Var())
# plot the Pmfs
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased_pmf])
thinkplot.Save(root='class_size1',
xlabel='class size',
ylabel='PMF',
axis=[0, 52, 0, 0.27])
def main(script):
live, firsts, others = first.MakeFrames()
MakeFigures(firsts, others)
MakeHists(live)
ClassSizes()
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
txemi/ansible | lib/ansible/utils/module_docs_fragments/openswitch.py | 166 | 4001 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport. Note this argument
does not affect the SSH argument.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(rest). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443). Note
this argument does not affect the SSH transport.
required: false
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which
transport is used. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(rest) transports. Note this argument does not affect the SSH
transport. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
require: false
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transports. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over ssh, cli or REST.
required: true
default: ssh
choices: ['ssh', 'cli', 'rest']
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
I(transport) argument is configured as rest. If the transport
argument is not I(rest), this value is ignored.
required: false
default: yes
choices: ['yes', 'no']
provider:
description:
- Convenience method that allows all I(openswitch) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
phalt/django | tests/admin_views/test_templatetags.py | 28 | 1026 | from __future__ import unicode_literals
from django.contrib.admin.templatetags.admin_modify import submit_row
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.test import RequestFactory
from django.urls import reverse
from .admin import site
from .tests import AdminViewBasicTestCase
class AdminTemplateTagsTest(AdminViewBasicTestCase):
def test_submit_row(self):
"""
submit_row template tag should pass whole context.
"""
factory = RequestFactory()
request = factory.get(reverse('admin:auth_user_change', args=[self.superuser.pk]))
request.user = self.superuser
admin = UserAdmin(User, site)
extra_context = {'extra': True}
response = admin.change_view(request, str(self.superuser.pk), extra_context=extra_context)
template_context = submit_row(response.context_data)
self.assertIs(template_context['extra'], True)
self.assertIs(template_context['show_save'], True)
| bsd-3-clause |
Nirvedh/CoarseCoherence | tests/quick/se/02.insttest/test.py | 56 | 1707 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
root.system.cpu[0].workload = LiveProcess(cmd = 'insttest',
executable = binpath('insttest'))
| bsd-3-clause |
alff0x1f/Misago | misago/users/api/userendpoints/signature.py | 8 | 2049 | from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from rest_framework import status
from rest_framework.response import Response
from misago.conf import settings
from misago.core.utils import format_plaintext_for_html
from misago.users.forms.options import EditSignatureForm
from misago.users.signatures import is_user_signature_valid, set_user_signature
def signature_endpoint(request):
user = request.user
if not user.acl['can_have_signature']:
raise PermissionDenied(
_("You don't have permission to change signature."))
if user.is_signature_locked:
if user.signature_lock_user_message:
reason = format_plaintext_for_html(
user.signature_lock_user_message)
else:
reason = None
return Response({
'detail': _("Your signature is locked. You can't change it."),
'reason': reason
},
status=status.HTTP_403_FORBIDDEN)
if request.method == 'POST':
return edit_signature(request, user)
return get_signature_options(user)
def get_signature_options(user):
options = {
'signature': None,
'limit': settings.signature_length_max,
}
if user.signature:
options['signature'] = {
'plain': user.signature,
'html': user.signature_parsed,
}
if not is_user_signature_valid(user):
options['signature']['html'] = None
return Response(options)
def edit_signature(request, user):
form = EditSignatureForm(request.data, instance=user)
if form.is_valid():
set_user_signature(
request, user, form.cleaned_data['signature'])
user.save(update_fields=[
'signature', 'signature_parsed', 'signature_checksum'
])
return get_signature_options(user)
else:
return Response({'detail': form.non_field_errors()[0]},
status=status.HTTP_400_BAD_REQUEST)
| gpl-2.0 |
0k/OpenUpgrade | addons/l10n_be_hr_payroll_account/__init__.py | 430 | 1046 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
500neo/tomorrow-theme | ipythonqt/tomorrownight.py | 31 | 2459 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
Tomorrow Night theme for ipython qtconsole (invoke with
ipython qtconsole --style=tomorrownight)
See https://github.com/chriskempson/tomorrow-theme for theme info
:copyright: Copyright 2012 André Risnes, [email protected]
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Text, Punctuation
class TomorrownightStyle(Style):
"""
Tomorrow Night theme for ipython qtconsole
"""
background_color = '#1d1f21'
highlight_color = '#373b41'
styles = {
Whitespace: background_color,
Text: '#c5c8c6',
Punctuation: '#81a2be',
Comment: '#b5bd68',
Comment.Preproc: 'italic #b5bd68',
Comment.Special: 'italic #b5bd68',
Keyword: '#81a2be',
Keyword.Type: '#f0c674',
Keyword.Namespace: '#de935f',
Operator.Word: '#81a2be',
Name: '#de935f',
Name.Builtin: '#de935f',
Name.Function: '#8abeb7',
Name.Class: '#f0c674',
Name.Namespace: '#81a2be',
Name.Variable: '#de935f',
Name.Constant: '#c5c8c6',
Name.Entity: 'bold #00aaaa',
Name.Attribute: '#de935f',
Name.Tag: 'bold #b5bd68',
Name.Decorator: '#cc6666',
String: '#b5bd68',
String.Symbol: '#b5bd68',
String.Regex: '#b5bd68',
Number: '#cc6666',
Generic.Heading: 'bold #c5c8c6',
Generic.Subheading: 'bold #c5c8c6',
Generic.Deleted: '#de935f',
Generic.Inserted: '#8abeb7',
Generic.Error: '#cc6666',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#b5bd68',
Generic.Output: '#c5c8c6',
Generic.Traceback: '#c5c8c6',
Error: '#cc6666'
}
| mit |
llvm-mirror/lldb | packages/Python/lldbsuite/test/lang/objc/ivar-IMP/TestObjCiVarIMP.py | 5 | 1484 | """
Test that dynamically discovered ivars of type IMP do not crash LLDB
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ObjCiVarIMPTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIf(archs=['i386']) # objc file does not build for i386
@no_debug_info_test
def test_imp_ivar_type(self):
"""Test that dynamically discovered ivars of type IMP do not crash LLDB"""
self.build()
exe = self.getBuildArtifact("a.out")
# Create a target from the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Set up our breakpoint
bkpt = lldbutil.run_break_set_by_source_regexp(self, "break here")
# Now launch the process, and do not stop at the entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
self.expect(
'frame variable --ptr-depth=1 --show-types -d run -- object',
substrs=[
'(MyClass *) object = 0x',
'(void *) myImp = 0x'])
self.expect(
'disassemble --start-address `((MyClass*)object)->myImp`',
substrs=['-[MyClass init]'])
| apache-2.0 |
matrixise/odoo | addons/crm_project_issue/project_issue.py | 380 | 2373 |
from openerp.osv import osv, fields
class crm_lead_to_project_issue_wizard(osv.TransientModel):
""" wizard to convert a Lead into a Project Issue and move the Mail Thread """
_name = "crm.lead2projectissue.wizard"
_inherit = 'crm.partner.binding'
_columns = {
"lead_id": fields.many2one("crm.lead", "Lead", domain=[("type", "=", "lead")]),
"project_id": fields.many2one("project.project", "Project", domain=[("use_issues", "=", True)])
}
_defaults = {
"lead_id": lambda self, cr, uid, context=None: context.get('active_id')
}
def action_lead_to_project_issue(self, cr, uid, ids, context=None):
# get the wizards and models
wizards = self.browse(cr, uid, ids, context=context)
Lead = self.pool["crm.lead"]
Issue = self.pool["project.issue"]
for wizard in wizards:
# get the lead to transform
lead = wizard.lead_id
partner = self._find_matching_partner(cr, uid, context=context)
if not partner and (lead.partner_name or lead.contact_name):
partner_ids = Lead.handle_partner_assignation(cr, uid, [lead.id], context=context)
partner = partner_ids[lead.id]
# create new project.issue
vals = {
"name": lead.name,
"description": lead.description,
"email_from": lead.email_from,
"project_id": wizard.project_id.id,
"partner_id": partner,
"user_id": None
}
issue_id = Issue.create(cr, uid, vals, context=None)
# move the mail thread
Lead.message_change_thread(cr, uid, lead.id, issue_id, "project.issue", context=context)
# delete the lead
Lead.unlink(cr, uid, [lead.id], context=None)
# return the action to go to the form view of the new Issue
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('model', '=', 'project.issue'), ('name', '=', 'project_issue_form_view')])
return {
'name': 'Issue created',
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'res_model': 'project.issue',
'type': 'ir.actions.act_window',
'res_id': issue_id,
'context': context
}
| agpl-3.0 |
shacker/django | tests/aggregation/tests.py | 21 | 46656 | import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.test import TestCase
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
'Using an aggregate in order_by() without also including it in '
'annotate() is not allowed: Avg(F(book__rating)'
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values('age').order_by(Avg('book__rating'))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(self.b1.id, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg('authors__age'))
.values('pk', 'isbn', 'mean_age')
)
self.assertEqual(
list(books), [
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
]
)
books = (
Book.objects
.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]['sql']
self.assertIn('SELECT COUNT(*) ', sql)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 2},
]
)
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 1},
{'rating': 4.0, 'count': 2},
]
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration')),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = (
Author.objects
.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': self.p4.id,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': self.p3.id,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': self.p1.id,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': self.p2.id,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books), [
(self.b1.id, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
msg = 'Expression contains mixed types. You must set output_field.'
qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price'))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Book.objects.aggregate(MyMax('pages', 'price'))
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Book.objects.annotate(MyMax('pages', 'price'))
Book.objects.aggregate(max_field=MyMax('pages', 'price'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {'function': self.function.lower(), 'expressions': sql}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {'function': 'MAX', 'expressions': '2'}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super().as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = 'QuerySet.aggregate() received non-expression(s): %s.'
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])):
Book.objects.aggregate(FloatField(), Avg('price'), is_book=True)
| bsd-3-clause |
dymkowsk/mantid | scripts/Interface/reduction_gui/reduction/reflectometer/refl_sf_calculator_data_series.py | 2 | 2342 | """
Classes for each reduction step. Those are kept separately
from the the interface class so that the HFIRReduction class could
be used independently of the interface implementation
"""
from __future__ import (absolute_import, division, print_function)
import xml.dom.minidom
from reduction_gui.reduction.scripter import BaseScriptElement
from reduction_gui.reduction.reflectometer.refl_sf_calculator_data_script import DataSets as REFLDataSets
class DataSeries(BaseScriptElement):
data_sets = []
def __init__(self, data_class=REFLDataSets):
super(DataSeries, self).__init__()
self._data_class = data_class
self.reset()
def to_script(self):
"""
Generate reduction script
@param execute: if true, the script will be executed
"""
script = ""
for item in self.data_sets:
script += item.to_script()
script += "\n"
return script
def update(self):
"""
Update transmission from reduction output
"""
pass
def to_xml(self):
"""
Create XML from the current data.
"""
xml = "<DataSeries>\n"
for item in self.data_sets:
xml += item.to_xml()
xml += "</DataSeries>\n"
return xml
def from_xml(self, xml_str):
"""
Read in data from XML
@param xml_str: text to read the data from
"""
self.reset()
self.data_sets = []
dom = xml.dom.minidom.parseString(xml_str)
# # Get Mantid version
# mtd_version = BaseScriptElement.getMantidBuildVersion(dom)
self._data_class = REFLDataSets
element_list = dom.getElementsByTagName("Data")
if len(element_list)==0:
element_list = dom.getElementsByTagName("RefLSFCalculator")
if len(element_list)>0:
for item in element_list:
if item is not None:
data_set = self._data_class()
data_set.from_xml_element(item)
self.data_sets.append(data_set)
if len(self.data_sets)==0:
self.data_sets = [self._data_class()]
def reset(self):
"""
Reset state
"""
self.data_sets = [self._data_class()]
| gpl-3.0 |
pretyman/hunter | scripts/upload-cache-to-github.py | 3 | 16852 | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import hashlib
import json
import os
import requests
import sys
import time
if os.getenv('HUNTER_GIT_EXECUTABLE'):
os.environ["GIT_PYTHON_GIT_EXECUTABLE"] = os.getenv('HUNTER_GIT_EXECUTABLE')
try:
import git
except ImportError as exc:
print("Import failed with error: {}".format(exc))
print("Possible fixes:")
print(" * Install gitpython module: 'pip install gitpython'")
print(" * Set environment variable HUNTER_GIT_EXECUTABLE")
sys.exit(1)
class Error(Exception):
pass
def sleep_time(attempt):
if attempt <= 0:
raise Exception('Unexpected')
if attempt == 1:
return 0
if attempt == 2:
return 15
if attempt == 3:
return 60
if attempt == 4:
return 90
if attempt == 5:
return 300
return 1200
def retry(func_in):
def func_out(*args, **kwargs):
retry_max = 10
i = 0
while True:
i = i + 1
try:
return func_in(*args, **kwargs)
except Error as err:
# Treat Errors as fatal and do not retry.
# Also explicitly flush message to avoid "no output" issue on some CIs.
print('Error:\n {}'.format(err))
sys.stdout.flush()
raise err
except Exception as exc:
if i > retry_max:
raise exc
print('Operation failed. Exception:\n {}'.format(exc))
sec = sleep_time(i)
print('Retry #{} (of {}) after {} seconds'.format(i, retry_max, sec))
sys.stdout.flush()
time.sleep(sec)
return func_out
class Github:
def __init__(self, username, password, repo_owner, repo):
self.repo_owner = repo_owner
self.repo = repo
self.username = username
self.password = password
self.auth = requests.auth.HTTPBasicAuth(username, password)
self.simple_request()
@retry
def simple_request(self):
print('Processing simple request')
r = requests.get('https://api.github.com', auth=self.auth)
if not r.ok:
sys.exit('Simple request fails. Check your password.')
limit = int(r.headers['X-RateLimit-Remaining'])
print('GitHub Limit: {}'.format(limit))
if limit == 0:
raise Exception('GitHub limit is 0')
print('Simple request pass')
@retry
def get_release_by_tag(self, tagname):
print('Get release-id by tag `{}`'.format(tagname))
# https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
# GET /repos/:owner/:repo/releases/tags/:tag
url = 'https://api.github.com/repos/{}/{}/releases/tags/{}'.format(
self.repo_owner,
self.repo,
tagname
)
r = requests.get(url, auth=self.auth)
if r.status_code == 404:
# Create release if not exists
# https://developer.github.com/v3/repos/releases/#create-a-release
# POST /repos/:owner/:repo/releases
post_url = 'https://api.github.com/repos/{}/{}/releases'.format(
self.repo_owner,
self.repo,
)
tag_data = "{" + '"tag_name": "{}"'.format(tagname) + "}"
r = requests.post(post_url, data=tag_data, auth=self.auth)
repo_name = "https://github.com/{}/{}".format(
self.repo_owner, self.repo
)
if r.status_code == 404:
raise Error(
"Repository not found '{}' or user '{}' has no access to it".
format(repo_name, self.username)
)
if r.status_code == 422:
raise Error(
"Please create at least one file in repository '{}'".
format(repo_name)
)
if not r.status_code == 201:
raise Error("Unexpected status code: {}".format(r.status_code))
if not r.ok:
raise Error("Can't create release tag {}".format(tagname))
r = requests.get(url, auth=self.auth)
if not r.ok:
raise Exception(
'Get release id failed. Status code: {}. Requested url: {}'.format(
r.status_code, url))
release_id = r.json()['id']
upload_url = r.json()['upload_url']
uri_template_vars = '{?name,label}'
if uri_template_vars not in upload_url:
raise Exception('Unsupported upload URI template: {}'.format(upload_url))
upload_url = upload_url.replace(uri_template_vars, '?name={}')
print('Release id: {}'.format(release_id))
print('Upload URL: {}'.format(upload_url))
return release_id, upload_url
@retry
def find_asset_id_by_name(self, release_id, name):
# https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
# GET /repos/:owner/:repo/releases/:id/assets
page_number = 1
keep_searching = True
while keep_searching:
url = 'https://api.github.com/repos/{}/{}/releases/{}/assets?page={}'.format(
self.repo_owner,
self.repo,
release_id,
page_number
)
print('Requesting URL: {}'.format(url))
r = requests.get(url, auth=self.auth)
if not r.ok:
raise Exception('Getting list of assets failed. Requested url: {}'.format(url))
json = r.json()
for x in json:
if name == x['name']:
return x['id']
if not json:
keep_searching = False
page_number = page_number + 1
return None
@retry
def delete_asset_by_id(self, asset_id, asset_name):
# https://developer.github.com/v3/repos/releases/#delete-a-release-asset
# DELETE /repos/:owner/:repo/releases/assets/:id
url = 'https://api.github.com/repos/{}/{}/releases/assets/{}'.format(
self.repo_owner,
self.repo,
asset_id
)
r = requests.delete(url, auth=self.auth)
if r.status_code == 204:
print('Asset removed: {}'.format(asset_name))
else:
raise Exception('Deletion of asset failed: {}'.format(asset_name))
def delete_asset_if_exists(self, release_id, asset_name):
asset_id = self.find_asset_id_by_name(release_id, asset_name)
if not asset_id:
print('Asset not exists: {}'.format(asset_name))
return
self.delete_asset_by_id(asset_id, asset_name)
def upload_bzip_once(self, url, local_path):
headers = {'Content-Type': 'application/x-bzip2'}
file_to_upload = open(local_path, 'rb')
r = requests.post(url, data=file_to_upload, headers=headers, auth=self.auth)
if not r.ok:
raise Exception('Upload of file failed')
@retry
def upload_bzip(self, url, local_path, release_id, asset_name):
print('Uploading:\n {}\n -> {}'.format(local_path, url))
try:
self.upload_bzip_once(url, local_path)
except Exception as exc:
print('Exception catched while uploading, removing asset...')
self.delete_asset_if_exists(release_id, asset_name)
raise exc
def upload_raw_file(self, local_path):
asset_name = hashlib.sha1(open(local_path, 'rb').read()).hexdigest()
tagname = 'cache-{}'.format(asset_name[0:7])
asset_name = asset_name + '.tar.bz2'
release_id, upload_url = self.get_release_by_tag(tagname)
# https://developer.github.com/v3/repos/releases/#upload-a-release-asset
# POST to upload_url received in the release description
# in get_release_by_tag()
url = upload_url.format(asset_name)
self.upload_bzip(url, local_path, release_id, asset_name)
class CacheEntry:
def __init__(self, cache_done_path, cache_dir):
self.cache_dir = cache_dir
self.cache_raw = os.path.join(self.cache_dir, 'raw')
self.cache_done_path = cache_done_path
if not os.path.exists(cache_done_path):
raise Exception('File not exists: {}'.format(cache_done_path))
self.cache_done_dir = os.path.dirname(self.cache_done_path)
self.from_server = os.path.join(self.cache_done_dir, 'from.server')
self.cache_sha1 = os.path.join(self.cache_done_dir, 'cache.sha1')
def entry_from_server(self):
return os.path.exists(self.from_server)
def upload_raw(self, github):
sha1 = open(self.cache_sha1, 'r').read()
if sha1 == '':
sys.exit('File with no content: {}'.format(self.cache_sha1))
raw = os.path.join(self.cache_raw, sha1 + '.tar.bz2')
if os.path.exists(raw):
github.upload_raw_file(raw)
# else:
# FIXME (old GitHub API upload): https://travis-ci.org/ingenue/hunter/jobs/347888167
# New Git-based upload: 'from.server' not present for old cache
def touch_from_server(self):
open(self.from_server, 'w')
class Cache:
def __init__(self, cache_dir):
self.cache_meta = os.path.join(cache_dir, 'meta')
self.repo = git.Repo.init(self.cache_meta)
self.entries = self.create_entries(cache_dir)
self.remove_entries_from_server()
def create_entries(self, cache_dir):
print('Searching for CACHE.DONE files in directory:\n {}\n'.format(cache_dir))
entries = []
for x in self.repo.untracked_files:
if x.endswith('CACHE.DONE'):
entries.append(CacheEntry(os.path.join(self.cache_meta, x), cache_dir))
print('Found {} files'.format(len(entries)))
return entries
def remove_entries_from_server(self):
new_entries = []
for i in self.entries:
if not i.entry_from_server():
new_entries.append(i)
self.entries = new_entries
def upload_raw(self, github):
for i in self.entries:
i.upload_raw(github)
def make_commit_message(self):
message = 'Uploading cache info\n\n'
env_list = []
job_url = ''
if os.getenv('TRAVIS') == 'true':
# * https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables
message += 'Travis:\n'
job_url = 'https://travis-ci.org/{}/jobs/{}'.format(
os.getenv('TRAVIS_REPO_SLUG'),
os.getenv('TRAVIS_JOB_ID')
)
env_list += [
'TRAVIS_BRANCH',
'TRAVIS_BUILD_ID',
'TRAVIS_BUILD_NUMBER',
'TRAVIS_JOB_ID',
'TRAVIS_JOB_NUMBER',
'TRAVIS_OS_NAME',
'TRAVIS_REPO_SLUG'
]
if os.getenv('APPVEYOR') == 'True':
# * http://www.appveyor.com/docs/environment-variables
message += 'AppVeyor:\n'
job_url = 'https://ci.appveyor.com/project/{}/{}/build/{}/job/{}'.format(
os.getenv('APPVEYOR_ACCOUNT_NAME'),
os.getenv('APPVEYOR_PROJECT_SLUG'),
os.getenv('APPVEYOR_BUILD_VERSION'),
os.getenv('APPVEYOR_JOB_ID')
)
env_list += [
'APPVEYOR_ACCOUNT_NAME',
'APPVEYOR_PROJECT_ID',
'APPVEYOR_PROJECT_NAME',
'APPVEYOR_PROJECT_SLUG',
'APPVEYOR_BUILD_ID',
'APPVEYOR_BUILD_NUMBER',
'APPVEYOR_BUILD_VERSION',
'APPVEYOR_JOB_ID',
'APPVEYOR_JOB_NAME',
'APPVEYOR_REPO_BRANCH'
]
# Store some info about build
for env_name in env_list:
env_value = os.getenv(env_name)
if env_value:
message += ' {}: {}\n'.format(env_name, env_value)
if job_url:
message += '\n Job URL: {}\n'.format(job_url)
return message
def try_to_push(self, main_remote, main_remote_url_pull, github):
try:
fetch_result = main_remote.pull(
allow_unrelated_histories=True,
strategy='recursive',
strategy_option='ours',
rebase=True,
depth=1
)
for x in fetch_result:
if x.flags & x.REJECTED:
print('Pull rejected')
return False
if x.flags & x.ERROR:
print('Pull error')
return False
except Exception as exc:
print("Pull failed: {}".format(exc))
return False
try:
main_remote.set_url(
'https://{}:{}@github.com/{}/{}'.format(
github.username,
github.password,
github.repo_owner,
github.repo
)
)
push_result = main_remote.push()
main_remote.set_url(main_remote_url_pull)
for x in push_result:
if x.flags & x.ERROR:
print('Push error')
return False
if x.flags & x.REJECTED:
print('Push rejected')
return False
if x.flags & x.REMOTE_FAILURE:
print('Push remote failure')
return False
if x.flags & x.REMOTE_REJECTED:
print('Push remote rejected')
return False
except:
# No exceptions expected, exit to avoid leakage of token
sys.exit('Unexpected exception')
return True
def upload_meta(self, github, cache_dir):
config = self.repo.config_writer()
config.set_value(
"user",
"email",
"{}@users.noreply.github.com".format(github.username)
)
config.set_value("user", "name", github.username)
if sys.platform == "win32":
config.set_value("core", "autocrlf", "input")
config.release()
if self.repo.is_dirty(untracked_files=True):
print('Adding untracked files:')
add_list = []
for x in self.repo.untracked_files:
to_add = False
if x.endswith('toolchain.info'):
to_add = True
elif x.endswith('args.cmake'):
to_add = True
elif x.endswith('types.info'):
to_add = True
elif x.endswith('internal_deps.id'):
to_add = True
elif x.endswith('basic-deps.info'):
to_add = True
elif x.endswith('basic-deps.DONE'):
to_add = True
elif x.endswith('cache.sha1'):
to_add = True
elif x.endswith('deps.info'):
to_add = True
elif x.endswith('CACHE.DONE'):
to_add = True
elif x.endswith('SHA1'):
to_add = True
if to_add:
print(' * {}'.format(x))
add_list.append(x)
sys.stdout.flush()
self.repo.index.add(add_list)
self.repo.index.commit(self.make_commit_message())
main_branch_found = False
for branch in self.repo.branches:
if branch.name == 'master':
main_branch_found = True
if not main_branch_found:
self.repo.git.branch('master')
main_remote_found = False
for remote in self.repo.remotes:
if remote.name == 'origin':
main_remote_found = True
main_remote = remote
main_remote_url_pull = 'https://github.com/{}/{}'.format(
github.repo_owner, github.repo
)
if not main_remote_found:
main_remote = self.repo.create_remote('origin', main_remote_url_pull)
retry_max = 10
fetch_ok = False
for i in range(1, retry_max):
try:
if fetch_ok:
break
print('Fetch remote (attempt #{})'.format(i))
sys.stdout.flush()
main_remote.fetch(depth=1)
fetch_ok = True
except Exception as exc:
print('Exception {}'.format(exc))
if not fetch_ok:
sys.exit('Fetch failed')
self.repo.heads.master.set_tracking_branch(main_remote.refs.master)
success = False
for i in range(1, retry_max):
print("Attempt #{}".format(i))
success = self.try_to_push(main_remote, main_remote_url_pull, github)
if success:
break
sec = sleep_time(i)
print('Retry #{} (of {}) after {} seconds'.format(i, retry_max, sec))
sys.stdout.flush()
time.sleep(sec)
if success:
print("Done")
else:
sys.exit("Can't push")
def touch_from_server(self):
for i in self.entries:
i.touch_from_server()
parser = argparse.ArgumentParser(
description='Script for uploading Hunter cache files to GitHub'
)
parser.add_argument(
'--username',
required=True,
help='Username'
)
parser.add_argument(
'--password',
required=True,
help='Password'
)
parser.add_argument(
'--repo-owner',
required=True,
help='Repository owner'
)
parser.add_argument(
'--repo',
required=True,
help='Repository name'
)
parser.add_argument(
'--cache-dir',
required=True,
help='Hunter cache directory, e.g. /home/user/.hunter/_Base/Cache'
)
args = parser.parse_args()
cache_dir = os.path.normpath(args.cache_dir)
# Some tests don't produce cache for some toolchains:
# * https://travis-ci.org/ingenue/hunter/jobs/185550289
if not os.path.exists(cache_dir):
print("*** WARNING *** Cache directory '{}' not found, skipping...".format(cache_dir))
sys.exit()
if not os.path.isdir(cache_dir):
raise Exception('Not a directory: {}'.format(cache_dir))
if os.path.split(cache_dir)[1] != 'Cache':
raise Exception('Cache directory path should ends with Cache: {}'.format(cache_dir))
cache = Cache(cache_dir)
password = args.password
if password == '' or password is None:
raise Exception('No password provided')
github = Github(
username = args.username,
password = password,
repo_owner = args.repo_owner,
repo = args.repo
)
cache.upload_raw(github)
cache.upload_meta(github, cache_dir)
print('Touch from.server files')
cache.touch_from_server()
| bsd-2-clause |
stefanpl/sublime-config | User/linters/go.py | 2 | 1051 | import os
from lint import Linter
def find_files(root, ext):
root = root.rstrip(os.sep) + os.sep
ret = []
for base, dirs, names in os.walk(root):
for name in names:
if name.endswith(ext):
base = base.replace(root, '', 1)
ret.append(os.path.join(base, name))
return ret
class Golang(Linter):
language = 'go'
cmd = ('go', 'build', '-gcflags', '-e -N')
regex = r'.+?:(?P<line>\d+): (?P<error>.+)'
def run(self, cmd, code):
code = code.encode('utf8')
if not self.filename:
tools = self.popen(('go', 'tool')).communicate()[0].split('\n')
for compiler in ('6g', '8g'):
if compiler in tools:
return self.tmpfile(('go', 'tool', compiler, '-e', '-o', os.devnull), code, suffix='.go')
else:
path = os.path.split(self.filename)[0]
os.chdir(path)
files = find_files(path, '.go')
answer = self.tmpdir(cmd, files, code)
return answer
| mit |
pigate/mongo-python-driver | test/test_uri_spec.py | 11 | 4682 | # Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the pymongo uri_parser module is up to spec."""
import json
import os
import sys
import warnings
sys.path[0:0] = [""]
from pymongo.uri_parser import parse_uri
from test import unittest
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.path.join('connection_string', 'test'))
class TestAllScenarios(unittest.TestCase):
pass
def create_test(scenario_def):
def run_scenario(self):
self.assertTrue(scenario_def['tests'], "tests cannot be empty")
for test in scenario_def['tests']:
dsc = test['description']
warned = False
error = False
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
options = parse_uri(test['uri'], warn=True)
except Warning:
warned = True
except Exception:
error = True
self.assertEqual(not error, test['valid'],
"Test failure '%s'" % dsc)
if test.get("warning", False):
self.assertTrue(warned,
"Expected warning for test '%s'"
% (dsc,))
# Redo in the case there were warnings that were not expected.
if warned:
options = parse_uri(test['uri'], warn=True)
# Compare hosts and port.
if test['hosts'] is not None:
self.assertEqual(
len(test['hosts']), len(options['nodelist']),
"Incorrect number of hosts parsed from URI")
for exp, actual in zip(test['hosts'],
options['nodelist']):
self.assertEqual(exp['host'], actual[0],
"Expected host %s but got %s"
% (exp['host'], actual[0]))
if exp['port'] is not None:
self.assertEqual(exp['port'], actual[1],
"Expected port %s but got %s"
% (exp['port'], actual))
# Compare auth options.
auth = test['auth']
if auth is not None:
auth['database'] = auth.pop('db') # db == database
# Special case for PyMongo's collection parsing.
if options.get('collection') is not None:
options['database'] += "." + options['collection']
for elm in auth:
if auth[elm] is not None:
self.assertEqual(auth[elm], options[elm],
"Expected %s but got %s"
% (auth[elm], options[elm]))
# Compare URI options.
if test['options'] is not None:
for opt in test['options']:
if options.get(opt) is not None:
self.assertEqual(
options[opt], test['options'][opt],
"For option %s expected %s but got %s"
% (opt, options[opt],
test['options'][opt]))
return run_scenario
def create_tests():
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)
dirname = os.path.split(dirname[-2])[-1] + '_' + dirname[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
scenario_def = json.load(scenario_stream)
# Construct test from scenario.
new_test = create_test(scenario_def)
test_name = 'test_%s_%s' % (
dirname, os.path.splitext(filename)[0])
new_test.__name__ = test_name
setattr(TestAllScenarios, new_test.__name__, new_test)
create_tests()
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
patilsangram/erpnext | erpnext/erpnext_integrations/connectors/shopify_connection.py | 8 | 9495 | from __future__ import unicode_literals
import frappe
from frappe import _
import json
from frappe.utils import cstr, cint, nowdate, flt
from erpnext.erpnext_integrations.utils import validate_webhooks_request
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note, make_sales_invoice
from erpnext.erpnext_integrations.doctype.shopify_settings.sync_product import sync_item_from_shopify
from erpnext.erpnext_integrations.doctype.shopify_settings.sync_customer import create_customer
from erpnext.erpnext_integrations.doctype.shopify_log.shopify_log import make_shopify_log, dump_request_data
@frappe.whitelist(allow_guest=True)
@validate_webhooks_request("Shopify Settings", 'X-Shopify-Hmac-Sha256', secret_key='shared_secret')
def store_request_data(order=None, event=None):
if frappe.request:
order = json.loads(frappe.request.data)
event = frappe.request.headers.get('X-Shopify-Topic')
dump_request_data(order, event)
def sync_sales_order(order, request_id=None):
shopify_settings = frappe.get_doc("Shopify Settings")
frappe.flags.request_id = request_id
if not frappe.db.get_value("Sales Order", filters={"shopify_order_id": cstr(order['id'])}):
try:
validate_customer(order, shopify_settings)
validate_item(order, shopify_settings)
create_order(order, shopify_settings)
except Exception as e:
make_shopify_log(status="Error", message=e.message, exception=False)
else:
make_shopify_log(status="Success")
def prepare_sales_invoice(order, request_id=None):
shopify_settings = frappe.get_doc("Shopify Settings")
frappe.flags.request_id = request_id
try:
sales_order = get_sales_order(cstr(order['id']))
if sales_order:
create_sales_invoice(order, shopify_settings, sales_order)
make_shopify_log(status="Success")
except Exception:
make_shopify_log(status="Error", exception=True)
def prepare_delivery_note(order, request_id=None):
shopify_settings = frappe.get_doc("Shopify Settings")
frappe.flags.request_id = request_id
try:
sales_order = get_sales_order(cstr(order['id']))
if sales_order:
create_delivery_note(order, shopify_settings, sales_order)
make_shopify_log(status="Success")
except Exception:
make_shopify_log(status="Error", exception=True)
def get_sales_order(shopify_order_id):
sales_order = frappe.db.get_value("Sales Order", filters={"shopify_order_id": shopify_order_id})
if sales_order:
so = frappe.get_doc("Sales Order", sales_order)
return so
def validate_customer(order, shopify_settings):
customer_id = order.get("customer", {}).get("id")
if customer_id:
if not frappe.db.get_value("Customer", {"shopify_customer_id": customer_id}, "name"):
create_customer(order.get("customer"), shopify_settings)
def validate_item(order, shopify_settings):
for item in order.get("line_items"):
if item.get("product_id") and not frappe.db.get_value("Item", {"shopify_product_id": item.get("product_id")}, "name"):
sync_item_from_shopify(shopify_settings, item)
def create_order(order, shopify_settings, company=None):
so = create_sales_order(order, shopify_settings, company)
if so:
if order.get("financial_status") == "paid":
create_sales_invoice(order, shopify_settings, so)
if order.get("fulfillments"):
create_delivery_note(order, shopify_settings, so)
def create_sales_order(shopify_order, shopify_settings, company=None):
product_not_exists = []
customer = frappe.db.get_value("Customer", {"shopify_customer_id": shopify_order.get("customer", {}).get("id")}, "name")
so = frappe.db.get_value("Sales Order", {"shopify_order_id": shopify_order.get("id")}, "name")
if not so:
items = get_order_items(shopify_order.get("line_items"), shopify_settings)
if not items:
message = 'Following items are exists in order but relevant record not found in Product master'
message += "\n" + ", ".join(product_not_exists)
make_shopify_log(status="Error", message=message, exception=True)
return ''
so = frappe.get_doc({
"doctype": "Sales Order",
"naming_series": shopify_settings.sales_order_series or "SO-Shopify-",
"shopify_order_id": shopify_order.get("id"),
"customer": customer or shopify_settings.default_customer,
"delivery_date": nowdate(),
"company": shopify_settings.company,
"selling_price_list": shopify_settings.price_list,
"ignore_pricing_rule": 1,
"items": items,
"taxes": get_order_taxes(shopify_order, shopify_settings),
"apply_discount_on": "Grand Total",
"discount_amount": get_discounted_amount(shopify_order),
})
if company:
so.update({
"company": company,
"status": "Draft"
})
so.flags.ignore_mandatory = True
so.save(ignore_permissions=True)
so.submit()
else:
so = frappe.get_doc("Sales Order", so)
frappe.db.commit()
return so
def create_sales_invoice(shopify_order, shopify_settings, so):
if not frappe.db.get_value("Sales Invoice", {"shopify_order_id": shopify_order.get("id")}, "name")\
and so.docstatus==1 and not so.per_billed and cint(shopify_settings.sync_sales_invoice):
si = make_sales_invoice(so.name, ignore_permissions=True)
si.shopify_order_id = shopify_order.get("id")
si.naming_series = shopify_settings.sales_invoice_series or "SI-Shopify-"
si.flags.ignore_mandatory = True
set_cost_center(si.items, shopify_settings.cost_center)
si.submit()
make_payament_entry_against_sales_invoice(si, shopify_settings)
frappe.db.commit()
def set_cost_center(items, cost_center):
for item in items:
item.cost_center = cost_center
def make_payament_entry_against_sales_invoice(doc, shopify_settings):
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
payemnt_entry = get_payment_entry(doc.doctype, doc.name, bank_account=shopify_settings.cash_bank_account)
payemnt_entry.flags.ignore_mandatory = True
payemnt_entry.reference_no = doc.name
payemnt_entry.reference_date = nowdate()
payemnt_entry.submit()
def create_delivery_note(shopify_order, shopify_settings, so):
if not cint(shopify_settings.sync_delivery_note):
return
for fulfillment in shopify_order.get("fulfillments"):
if not frappe.db.get_value("Delivery Note", {"shopify_fulfillment_id": fulfillment.get("id")}, "name")\
and so.docstatus==1:
dn = make_delivery_note(so.name)
dn.shopify_order_id = fulfillment.get("order_id")
dn.shopify_fulfillment_id = fulfillment.get("id")
dn.naming_series = shopify_settings.delivery_note_series or "DN-Shopify-"
dn.items = get_fulfillment_items(dn.items, fulfillment.get("line_items"), shopify_settings)
dn.flags.ignore_mandatory = True
dn.save()
frappe.db.commit()
def get_fulfillment_items(dn_items, fulfillment_items, shopify_settings):
return [dn_item.update({"qty": item.get("quantity")}) for item in fulfillment_items for dn_item in dn_items\
if get_item_code(item) == dn_item.item_code]
def get_discounted_amount(order):
discounted_amount = 0.0
for discount in order.get("discount_codes"):
discounted_amount += flt(discount.get("amount"))
return discounted_amount
def get_order_items(order_items, shopify_settings):
items = []
all_product_exists = True
product_not_exists = []
for shopify_item in order_items:
if not shopify_item.get('product_exists'):
all_product_exists = False
product_not_exists.append({'title':shopify_item.get('title'),
'shopify_order_id': shopify_item.get('id')})
continue
if all_product_exists:
item_code = get_item_code(shopify_item)
items.append({
"item_code": item_code,
"item_name": shopify_item.get("name"),
"rate": shopify_item.get("price"),
"delivery_date": nowdate(),
"qty": shopify_item.get("quantity"),
"stock_uom": shopify_item.get("sku"),
"warehouse": shopify_settings.warehouse
})
else:
items = []
return items
def get_item_code(shopify_item):
item_code = frappe.db.get_value("Item", {"shopify_variant_id": shopify_item.get("variant_id")}, "item_code")
if not item_code:
item_code = frappe.db.get_value("Item", {"shopify_product_id": shopify_item.get("product_id")}, "item_code")
if not item_code:
item_code = frappe.db.get_value("Item", {"item_name": shopify_item.get("title")}, "item_code")
return item_code
def get_order_taxes(shopify_order, shopify_settings):
taxes = []
for tax in shopify_order.get("tax_lines"):
taxes.append({
"charge_type": _("On Net Total"),
"account_head": get_tax_account_head(tax),
"description": "{0} - {1}%".format(tax.get("title"), tax.get("rate") * 100.0),
"rate": tax.get("rate") * 100.00,
"included_in_print_rate": 1 if shopify_order.get("taxes_included") else 0,
"cost_center": shopify_settings.cost_center
})
taxes = update_taxes_with_shipping_lines(taxes, shopify_order.get("shipping_lines"), shopify_settings)
return taxes
def update_taxes_with_shipping_lines(taxes, shipping_lines, shopify_settings):
for shipping_charge in shipping_lines:
taxes.append({
"charge_type": _("Actual"),
"account_head": get_tax_account_head(shipping_charge),
"description": shipping_charge["title"],
"tax_amount": shipping_charge["price"],
"cost_center": shopify_settings.cost_center
})
return taxes
def get_tax_account_head(tax):
tax_title = tax.get("title").encode("utf-8")
tax_account = frappe.db.get_value("Shopify Tax Account", \
{"parent": "Shopify Settings", "shopify_tax": tax_title}, "tax_account")
if not tax_account:
frappe.throw("Tax Account not specified for Shopify Tax {0}".format(tax.get("title")))
return tax_account
| gpl-3.0 |
jldbc/pybaseball | pybaseball/standings.py | 1 | 3820 | from typing import List, Optional
import pandas as pd
import requests
from bs4 import BeautifulSoup, Comment, PageElement, ResultSet
from . import cache
from .utils import most_recent_season
def get_soup(year: int) -> BeautifulSoup:
url = f'http://www.baseball-reference.com/leagues/MLB/{year}-standings.shtml'
s = requests.get(url).content
return BeautifulSoup(s, "lxml")
def get_tables(soup: BeautifulSoup, season: int) -> List[pd.DataFrame]:
datasets = []
if season >= 1969:
tables: List[PageElement] = soup.find_all('table')
if season == 1981:
# For some reason BRef has 1981 broken down by halves and overall
# https://www.baseball-reference.com/leagues/MLB/1981-standings.shtml
tables = [x for x in tables if 'overall' in x.get('id', '')]
for table in tables:
data = []
headings: List[PageElement] = [th.get_text() for th in table.find("tr").find_all("th")]
data.append(headings)
table_body: PageElement = table.find('tbody')
rows: List[PageElement] = table_body.find_all('tr')
for row in rows:
cols: List[PageElement] = row.find_all('td')
cols_text: List[str] = [ele.text.strip() for ele in cols]
cols_text.insert(0, row.find_all('a')[0].text.strip()) # team name
data.append([ele for ele in cols_text if ele])
datasets.append(data)
else:
data = []
table = soup.find('table')
headings = [th.get_text() for th in table.find("tr").find_all("th")]
headings[0] = "Name"
if season >= 1930:
for _ in range(15):
headings.pop()
elif season >= 1876:
for _ in range(14):
headings.pop()
else:
for _ in range(16):
headings.pop()
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
if row.find_all('a') == []:
continue
cols = row.find_all('td')
if season >= 1930:
for _ in range(15):
cols.pop()
elif season >= 1876:
for _ in range(14):
cols.pop()
else:
for _ in range(16):
cols.pop()
cols = [ele.text.strip() for ele in cols]
cols.insert(0,row.find_all('a')[0].text.strip()) # team name
data.append([ele for ele in cols if ele])
datasets.append(data)
#convert list-of-lists to dataframes
for idx in range(len(datasets)):
datasets[idx] = pd.DataFrame(datasets[idx])
return datasets #returns a list of dataframes
@cache.df_cache()
def standings(season:Optional[int] = None) -> pd.DataFrame:
# get most recent standings if date not specified
if season is None:
season = most_recent_season()
if season < 1876:
raise ValueError(
"This query currently only returns standings until the 1876 season. "
"Try looking at years from 1876 to present."
)
# retrieve html from baseball reference
soup = get_soup(season)
if season >= 1969:
raw_tables = get_tables(soup, season)
else:
t = [x for x in soup.find_all(string=lambda text:isinstance(text,Comment)) if 'expanded_standings_overall' in x]
code = BeautifulSoup(t[0], "lxml")
raw_tables = get_tables(code, season)
tables = [pd.DataFrame(table) for table in raw_tables]
for idx in range(len(tables)):
tables[idx] = tables[idx].rename(columns=tables[idx].iloc[0])
tables[idx] = tables[idx].reindex(tables[idx].index.drop(0))
return tables
| mit |
rmerlino/dleyna-server | test/dbus/monitor_changed.py | 6 | 1511 | #!/usr/bin/env python
# monitor_last_change
#
# Copyright (C) 2012 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# Regis Merlino <[email protected]>
# Mark Ryan <[email protected]>
#
import gobject
import dbus
import dbus.mainloop.glib
import json
def print_properties(props):
print json.dumps(props, indent=4, sort_keys=True)
def changed(objects, path):
print "Changed signal from [%s]" % path
print "Objects:"
print_properties(objects)
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SessionBus()
bus.add_signal_receiver(changed,
bus_name="com.intel.dleyna-server",
signal_name = "Changed",
path_keyword="path")
mainloop = gobject.MainLoop()
mainloop.run()
| lgpl-2.1 |
jbbskinny/sympy | sympy/polys/tests/test_partfrac.py | 48 | 5244 | """Tests for algorithms for partial fraction decomposition of rational
functions. """
from sympy.polys.partfrac import (
apart_undetermined_coeffs,
apart,
apart_list, assemble_partfrac_list
)
from sympy import (S, Poly, E, pi, I, Matrix, Eq, RootSum, Lambda,
Symbol, Dummy, factor, together, sqrt, Expr)
from sympy.utilities.pytest import raises
from sympy.abc import x, y, a, b, c
def test_apart():
assert apart(1) == 1
assert apart(1, x) == 1
f, g = (x**2 + 1)/(x + 1), 2/(x + 1) + x - 1
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x + 2)/(x + 1), 1/(1 + x) - 1/(2 + x)
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x + 1)/(x + 5), -1/(5 + x)/4 + 1/(1 + x)/4
assert apart(f, full=False) == g
assert apart(f, full=True) == g
assert apart((E*x + 2)/(x - pi)*(x - 1), x) == \
2 - E + E*pi + E*x + (E*pi + 2)*(pi - 1)/(x - pi)
assert apart(Eq((x**2 + 1)/(x + 1), x), x) == Eq(x - 1 + 2/(x + 1), x)
raises(NotImplementedError, lambda: apart(1/(x + 1)/(y + 2)))
def test_apart_matrix():
M = Matrix(2, 2, lambda i, j: 1/(x + i + 1)/(x + j))
assert apart(M) == Matrix([
[1/x - 1/(x + 1), (x + 1)**(-2)],
[1/(2*x) - (S(1)/2)/(x + 2), 1/(x + 1) - 1/(x + 2)],
])
def test_apart_symbolic():
f = a*x**4 + (2*b + 2*a*c)*x**3 + (4*b*c - a**2 + a*c**2)*x**2 + \
(-2*a*b + 2*b*c**2)*x - b**2
g = a**2*x**4 + (2*a*b + 2*c*a**2)*x**3 + (4*a*b*c + b**2 +
a**2*c**2)*x**2 + (2*c*b**2 + 2*a*b*c**2)*x + b**2*c**2
assert apart(f/g, x) == 1/a - 1/(x + c)**2 - b**2/(a*(a*x + b)**2)
assert apart(1/((x + a)*(x + b)*(x + c)), x) == \
1/((a - c)*(b - c)*(c + x)) - 1/((a - b)*(b - c)*(b + x)) + \
1/((a - b)*(a - c)*(a + x))
def test_apart_extension():
f = 2/(x**2 + 1)
g = I/(x + I) - I/(x - I)
assert apart(f, extension=I) == g
assert apart(f, gaussian=True) == g
f = x/((x - 2)*(x + I))
assert factor(together(apart(f))) == f
def test_apart_full():
f = 1/(x**2 + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == \
-RootSum(x**2 + 1, Lambda(a, a/(x - a)), auto=False)/2
f = 1/(x**3 + x + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == \
RootSum(x**3 + x + 1,
Lambda(a, (6*a**2/31 - 9*a/31 + S(4)/31)/(x - a)), auto=False)
f = 1/(x**5 + 1)
assert apart(f, full=False) == \
(-S(1)/5)*((x**3 - 2*x**2 + 3*x - 4)/(x**4 - x**3 + x**2 -
x + 1)) + (S(1)/5)/(x + 1)
assert apart(f, full=True) == \
-RootSum(x**4 - x**3 + x**2 - x + 1,
Lambda(a, a/(x - a)), auto=False)/5 + (S(1)/5)/(x + 1)
def test_apart_undetermined_coeffs():
p = Poly(2*x - 3)
q = Poly(x**9 - x**8 - x**6 + x**5 - 2*x**2 + 3*x - 1)
r = (-x**7 - x**6 - x**5 + 4)/(x**8 - x**5 - 2*x + 1) + 1/(x - 1)
assert apart_undetermined_coeffs(p, q) == r
p = Poly(1, x, domain='ZZ[a,b]')
q = Poly((x + a)*(x + b), x, domain='ZZ[a,b]')
r = 1/((a - b)*(b + x)) - 1/((a - b)*(a + x))
assert apart_undetermined_coeffs(p, q) == r
def test_apart_list():
from sympy.utilities.iterables import numbered_symbols
w0, w1, w2 = Symbol("w0"), Symbol("w1"), Symbol("w2")
_a = Dummy("a")
f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
assert apart_list(f, x, dummies=numbered_symbols("w")) == (-1,
Poly(S(2)/3, x, domain='QQ'),
[(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
assert apart_list(2/(x**2-2), x, dummies=numbered_symbols("w")) == (1,
Poly(0, x, domain='ZZ'),
[(Poly(w0**2 - 2, w0, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x), 1)])
f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
assert apart_list(f, x, dummies=numbered_symbols("w")) == (1,
Poly(0, x, domain='ZZ'),
[(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(w1**2 - 1, w1, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(w2 + 1, w2, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
def test_assemble_partfrac_list():
f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
pfd = apart_list(f)
assert assemble_partfrac_list(pfd) == -4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
a = Dummy("a")
pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
assert assemble_partfrac_list(pfd) == -1/(sqrt(2)*(x + sqrt(2))) + 1/(sqrt(2)*(x - sqrt(2)))
def test_noncommutative_pseudomultivariate():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/(1 + y)
assert apart(e + foo(e)) == c + foo(c)
assert apart(e*foo(e)) == c*foo(c)
def test_issue_5798():
assert apart(
2*x/(x**2 + 1) - (x - 1)/(2*(x**2 + 1)) + 1/(2*(x + 1)) - 2/x) == \
(3*x + 1)/(x**2 + 1)/2 + 1/(x + 1)/2 - 2/x
| bsd-3-clause |
schnittstabil/findd | tests/utils/test_path.py | 1 | 2655 | from os import pathsep
from os.path import basename
from os.path import dirname
from os.path import join
from mock import patch
from mock import Mock
from .. import TestCase
import findd.utils.path as sut
class Parents(TestCase):
def test_should_work_with_empty_paths(self):
self.assertEqual(list(sut.parents('')), [''])
def test_should_work_with_posix_paths(self):
ps = sut.parents('a/b/c/d')
prev = next(ps)
self.assertEqual(prev, 'a/b/c/d')
if pathsep == '/':
prev = next(ps)
self.assertEqual(prev, 'a/b/c')
for p in ps:
self.assertEqual(p, dirname(prev))
prev = p
def test_should_work_with_nt_paths(self):
ps = sut.parents('C:\\a\\b\\c\\d')
prev = next(ps)
self.assertEqual(prev, 'C:\\a\\b\\c\\d')
if pathsep == '\\':
prev = next(ps)
self.assertEqual(prev, 'C:\\a\\b\\c')
for p in ps:
self.assertEqual(p, dirname(prev))
prev = p
class FilesOfDir(TestCase):
@patch('findd.utils.path.walk')
def test_should_work_with_empty_dirs(self, walk):
walk.return_value = [('virtual', [], [])]
self.assertEqual(list(sut.files_of_dir('virtual')), [])
self.assertEqual(len(walk.mock_calls), 1)
@patch('findd.utils.path.walk')
@patch('findd.utils.path.stat')
def test_should_work_with_non_empty_dirs(self, stat, walk):
walk.return_value = [
('virtual', ['dir'], []),
('virtual/dir', [], ['123.file'])
]
path123 = 'virtual/dir/123.file'
files = list(sut.files_of_dir('virtual'))
self.assertEqual(len(stat.mock_calls), 1)
self.assertEqual(len(walk.mock_calls), 1)
stat.assert_called_once_with(path123)
self.assertEqual(list(map(lambda e: e.path, files)), [path123])
@patch('findd.utils.path.walk')
@patch('findd.utils.path.stat')
def test_should_respect_is_excluded(self, stat, walk):
def exclude_dotfindd(path):
return basename(path) == '.findd'
walk.return_value = [
('virtual', ['.findd'], ['root.file']),
]
is_excluded = Mock(side_effect=exclude_dotfindd)
files = list(sut.files_of_dir('virtual', is_excluded=is_excluded))
self.assertEqual(len(walk.mock_calls), 1)
self.assertEqual(len(is_excluded.mock_calls), 2)
self.assertEqual(len(stat.mock_calls), 1)
self.assertEqual(walk.return_value[0][1], [])
paths = list(map(lambda e: e.path, files))
self.assertEqual(paths, [join('virtual', 'root.file')])
| mit |
lalanne/SmallServers | py/common/ResponseBuilder.py | 1 | 1947 |
def build_base_message(msisdn, result):
message = '<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>' + \
'<msg>' + \
'<header action=\"1\" id=\"1111\"/>' + \
'<resp>' + \
'<op>rslt_comp_promo</op>' + \
'<msisdn>' + str(msisdn) + '</msisdn>' + \
'<result>' + str(result) + '</result>' + \
'<strresult>OK</strresult>' + \
'</resp>' + \
'</msg>'
return message
class Response:
def __init__(self, msisdn):
self.msisdn = int(msisdn)
self.RSLT_EXITO = 0
self.RSLT_ERR_PROMO_INEXISTENTE = 4
self.RSLT_ERR_SIN_SALDO_PP = 14
self.RSLT_ERR_TIPO_CLIENTE_DST = 18
def build(self):
# print '[build] msisdn[' + str(self.msisdn) + ']'
response = 'no response'
if self.msisdn == 56999694443:
response = build_base_message(self.msisdn,
self.RSLT_ERR_PROMO_INEXISTENTE)
elif self.msisdn == 56999694444:
response = build_base_message(self.msisdn,
self.RSLT_EXITO)
elif self.msisdn >= 56999694448 and self.msisdn <= 56999694459:
response = build_base_message(self.msisdn,
self.RSLT_EXITO)
else:
epilogue = str(self.msisdn)[-2:]
print 'epilogue[' + epilogue + ']'
if epilogue == '14':
response = build_base_message(self.msisdn,
self.RSLT_ERR_SIN_SALDO_PP)
elif epilogue == '10':
response = build_base_message(self.msisdn,
self.RSLT_ERR_TIPO_CLIENTE_DST)
elif epilogue == '25':
response = 'no response'
else:
print '[ERROR] not recognized msisdn[' + str(self.msisdn) + ']'
return response
| gpl-3.0 |
sam-tsai/django | tests/update_only_fields/tests.py | 296 | 9780 | from __future__ import unicode_literals
from django.db.models.signals import post_save, pre_save
from django.test import TestCase
from .models import Account, Employee, Person, Profile, ProxyEmployee
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instance should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1, a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertIn('name', pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertIn('name', post_save_data[0])
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
pre_save.disconnect(pre_save_receiver)
post_save.disconnect(post_save_receiver)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
| bsd-3-clause |
susansalkeld/discsongs | discsongs/lib/python2.7/site-packages/setuptools/tests/test_svn.py | 300 | 7806 | # -*- coding: utf-8 -*-
"""svn tests"""
import io
import os
import subprocess
import sys
import unittest
from setuptools.tests import environment
from setuptools.compat import unicode, unichr
from setuptools import svn_utils
from setuptools.tests.py26compat import skipIf
def _do_svn_check():
try:
subprocess.check_call(["svn", "--version"],
shell=(sys.platform == 'win32'))
return True
except (OSError, subprocess.CalledProcessError):
return False
_svn_check = _do_svn_check()
class TestSvnVersion(unittest.TestCase):
def test_no_svn_found(self):
path_variable = None
for env in os.environ:
if env.lower() == 'path':
path_variable = env
if path_variable is None:
try:
self.skipTest('Cannot figure out how to modify path')
except AttributeError: # PY26 doesn't have this
return
old_path = os.environ[path_variable]
os.environ[path_variable] = ''
try:
version = svn_utils.SvnInfo.get_svn_version()
self.assertEqual(version, '')
finally:
os.environ[path_variable] = old_path
@skipIf(not _svn_check, "No SVN to text, in the first place")
def test_svn_should_exist(self):
version = svn_utils.SvnInfo.get_svn_version()
self.assertNotEqual(version, '')
def _read_utf8_file(path):
fileobj = None
try:
fileobj = io.open(path, 'r', encoding='utf-8')
data = fileobj.read()
return data
finally:
if fileobj:
fileobj.close()
class ParserInfoXML(unittest.TestCase):
def parse_tester(self, svn_name, ext_spaces):
path = os.path.join('setuptools', 'tests',
'svn_data', svn_name + '_info.xml')
#Remember these are pre-generated to test XML parsing
# so these paths might not valid on your system
example_base = "%s_example" % svn_name
data = _read_utf8_file(path)
expected = set([
("\\".join((example_base, 'a file')), 'file'),
("\\".join((example_base, 'folder')), 'dir'),
("\\".join((example_base, 'folder', 'lalala.txt')), 'file'),
("\\".join((example_base, 'folder', 'quest.txt')), 'file'),
])
self.assertEqual(set(x for x in svn_utils.parse_dir_entries(data)),
expected)
def test_svn13(self):
self.parse_tester('svn13', False)
def test_svn14(self):
self.parse_tester('svn14', False)
def test_svn15(self):
self.parse_tester('svn15', False)
def test_svn16(self):
self.parse_tester('svn16', True)
def test_svn17(self):
self.parse_tester('svn17', True)
def test_svn18(self):
self.parse_tester('svn18', True)
class ParserExternalXML(unittest.TestCase):
def parse_tester(self, svn_name, ext_spaces):
path = os.path.join('setuptools', 'tests',
'svn_data', svn_name + '_ext_list.xml')
example_base = svn_name + '_example'
data = _read_utf8_file(path)
if ext_spaces:
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
expected = set([
os.sep.join((example_base, folder2)),
os.sep.join((example_base, folder3)),
# folder is third_party大介
os.sep.join((example_base,
unicode('third_party') +
unichr(0x5927) + unichr(0x4ecb))),
os.sep.join((example_base, 'folder', folder2)),
os.sep.join((example_base, 'folder', folder3)),
os.sep.join((example_base, 'folder',
unicode('third_party') +
unichr(0x5927) + unichr(0x4ecb))),
])
expected = set(os.path.normpath(x) for x in expected)
dir_base = os.sep.join(('C:', 'development', 'svn_example'))
self.assertEqual(set(x for x
in svn_utils.parse_externals_xml(data, dir_base)), expected)
def test_svn15(self):
self.parse_tester('svn15', False)
def test_svn16(self):
self.parse_tester('svn16', True)
def test_svn17(self):
self.parse_tester('svn17', True)
def test_svn18(self):
self.parse_tester('svn18', True)
class ParseExternal(unittest.TestCase):
def parse_tester(self, svn_name, ext_spaces):
path = os.path.join('setuptools', 'tests',
'svn_data', svn_name + '_ext_list.txt')
data = _read_utf8_file(path)
if ext_spaces:
expected = set(['third party2', 'third party3',
'third party3b', 'third_party'])
else:
expected = set(['third_party2', 'third_party3', 'third_party'])
self.assertEqual(set(x for x in svn_utils.parse_external_prop(data)),
expected)
def test_svn13(self):
self.parse_tester('svn13', False)
def test_svn14(self):
self.parse_tester('svn14', False)
def test_svn15(self):
self.parse_tester('svn15', False)
def test_svn16(self):
self.parse_tester('svn16', True)
def test_svn17(self):
self.parse_tester('svn17', True)
def test_svn18(self):
self.parse_tester('svn18', True)
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # empty or null
self.dataname = None
self.datafile = None
return
self.base_version = tuple([int(x) for x in version.split('.')[:2]])
if self.base_version < (1,3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1,9):
#trying the latest version
self.base_version = (1,8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not _svn_check, "No SVN to text, in the first place")
def test_revision(self):
rev = svn_utils.SvnInfo.load('.').get_revision()
self.assertEqual(rev, 6)
@skipIf(not _svn_check, "No SVN to text, in the first place")
def test_entries(self):
expected = set([
(os.path.join('a file'), 'file'),
(os.path.join('folder'), 'dir'),
(os.path.join('folder', 'lalala.txt'), 'file'),
(os.path.join('folder', 'quest.txt'), 'file'),
#The example will have a deleted file (or should)
#but shouldn't return it
])
info = svn_utils.SvnInfo.load('.')
self.assertEqual(set(x for x in info.entries), expected)
@skipIf(not _svn_check, "No SVN to text, in the first place")
def test_externals(self):
if self.base_version >= (1,6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
expected = set([
os.path.join(folder2),
os.path.join(folder3),
os.path.join('third_party'),
os.path.join('folder', folder2),
os.path.join('folder', folder3),
os.path.join('folder', 'third_party'),
])
info = svn_utils.SvnInfo.load('.')
self.assertEqual(set([x for x in info.externals]), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| mit |
sgerhart/ansible | test/units/modules/network/nso/nso_module.py | 28 | 3992 | # Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import os
import json
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path not in fixture_data:
with open(path) as f:
data = json.load(f)
fixture_data[path] = data
return fixture_data[path]
class MockResponse(object):
def __init__(self, method, params, code, body, headers=None):
if headers is None:
headers = {}
self.method = method
self.params = params
self.code = code
self.body = body
self.headers = dict(headers)
def read(self):
return self.body
def mock_call(calls, url, timeout, data=None, headers=None, method=None):
if len(calls) == 0:
raise ValueError('no call mock for method {0}({1})'.format(
url, data))
result = calls[0]
del calls[0]
request = json.loads(data)
if result.method != request['method']:
raise ValueError('expected method {0}({1}), got {2}({3})'.format(
result.method, result.params,
request['method'], request['params']))
for key, value in result.params.items():
if key not in request['params']:
raise ValueError('{0} not in parameters'.format(key))
if value != request['params'][key]:
raise ValueError('expected {0} to be {1}, got {2}'.format(
key, value, request['params'][key]))
return result
class AnsibleExitJson(Exception):
pass
class AnsibleFailJson(Exception):
pass
class TestNsoModule(unittest.TestCase):
def execute_module(self, failed=False, changed=False, **kwargs):
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
for key, value in kwargs.items():
if key not in result:
self.fail("{0} not in result {1}".format(key, result))
self.assertEqual(value, result[key])
return result
def failed(self):
def fail_json(*args, **kwargs):
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
with patch.object(basic.AnsibleModule, 'fail_json', fail_json):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
def exit_json(*args, **kwargs):
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
with patch.object(basic.AnsibleModule, 'exit_json', exit_json):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
| mit |
theheros/kbengine | kbe/res/scripts/common/Lib/multiprocessing/process.py | 2 | 10113 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = next(_current_process._counter)
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
_dangling.add(self)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
if sys.stdin is not None:
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
old_process = _current_process
_current_process = self
try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif type(e.args[0]) is int:
exitcode = e.args[0]
else:
sys.stderr.write(e.args[0] + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| lgpl-3.0 |
Carrotsmile/CS428 | steerstats/tools/deap/benchmarks/binary.py | 19 | 4840 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import math
def bin2float(min_, max_, nbits):
"""Convert a binary array into an array of float where each
float is composed of *nbits* and is between *min_* and *max_*
and return the result of the decorated function.
.. note::
This decorator requires the first argument of
the evaluation function to be named *individual*.
"""
def wrap(function):
def wrapped_function(individual, *args, **kargs):
nelem = len(individual)/nbits
decoded = [0] * nelem
for i in xrange(nelem):
gene = int("".join(map(str, individual[i*nbits:i*nbits+nbits])), 2)
div = 2**nbits - 1
temp = float(gene)/float(div)
decoded[i] = min_ + (temp * (max_ - min_))
return function(decoded, *args, **kargs)
return wrapped_function
return wrap
def trap(individual):
u = sum(individual)
k = len(individual)
if u == k:
return k
else:
return k - 1 - u
def inv_trap(individual):
u = sum(individual)
k = len(individual)
if u == 0:
return k
else:
return u - 1
def chuang_f1(individual):
"""Binary deceptive function from : Multivariate Multi-Model Approach for
Globally Multimodal Problems by Chung-Yao Chuang and Wen-Lian Hsu.
The function takes individual of 40+1 dimensions and has two global optima
in [1,1,...,1] and [0,0,...,0].
"""
total = 0
if individual[-1] == 0:
for i in xrange(0,len(individual)-1,4):
total += inv_trap(individual[i:i+4])
else:
for i in xrange(0,len(individual)-1,4):
total += trap(individual[i:i+4])
return total,
def chuang_f2(individual):
"""Binary deceptive function from : Multivariate Multi-Model Approach for
Globally Multimodal Problems by Chung-Yao Chuang and Wen-Lian Hsu.
The function takes individual of 40+1 dimensions and has four global optima
in [1,1,...,0,0], [0,0,...,1,1], [1,1,...,1] and [0,0,...,0].
"""
total = 0
if individual[-2] == 0 and individual[-1] == 0:
for i in xrange(0,len(individual)-2,8):
total += inv_trap(individual[i:i+4]) + inv_trap(individual[i+4:i+8])
elif individual[-2] == 0 and individual[-1] == 1:
for i in xrange(0,len(individual)-2,8):
total += inv_trap(individual[i:i+4]) + trap(individual[i+4:i+8])
elif individual[-2] == 1 and individual[-1] == 0:
for i in xrange(0,len(individual)-2,8):
total += trap(individual[i:i+4]) + inv_trap(individual[i+4:i+8])
else:
for i in xrange(0,len(individual)-2,8):
total += trap(individual[i:i+4]) + trap(individual[i+4:i+8])
return total,
def chuang_f3(individual):
"""Binary deceptive function from : Multivariate Multi-Model Approach for
Globally Multimodal Problems by Chung-Yao Chuang and Wen-Lian Hsu.
The function takes individual of 40+1 dimensions and has two global optima
in [1,1,...,1] and [0,0,...,0].
"""
total = 0
if individual[-1] == 0:
for i in xrange(0,len(individual)-1,4):
total += inv_trap(individual[i:i+4])
else:
for i in xrange(2,len(individual)-3,4):
total += inv_trap(individual[i:i+4])
total += trap(individual[-2:]+individual[:2])
return total,
# Royal Road Functions
def royal_road1(individual, order):
"""Royal Road Function R1 as presented by Melanie Mitchell in :
"An introduction to Genetic Algorithms".
"""
nelem = len(individual) / order
max_value = int(2**order - 1)
total = 0
for i in xrange(nelem):
value = int("".join(map(str, individual[i*order:i*order+order])), 2)
total += int(order) * int(value/max_value)
return total,
def royal_road2(individual, order):
"""Royal Road Function R2 as presented by Melanie Mitchell in :
"An introduction to Genetic Algorithms".
"""
total = 0
norder = order
while norder < order**2:
total += royal_road1(norder, individual)[0]
norder *= 2
return total,
| gpl-3.0 |
redebian/documentation | django/contrib/comments/admin.py | 361 | 3299 | from django.contrib import admin
from django.contrib.comments.models import Comment
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib.comments import get_model
from django.contrib.comments.views.moderation import perform_flag, perform_approve, perform_delete
class CommentsAdmin(admin.ModelAdmin):
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
list_display = ('name', 'content_type', 'object_pk', 'ip_address', 'submit_date', 'is_public', 'is_removed')
list_filter = ('submit_date', 'site', 'is_public', 'is_removed')
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user',)
search_fields = ('comment', 'user__username', 'user_name', 'user_email', 'user_url', 'ip_address')
actions = ["flag_comments", "approve_comments", "remove_comments"]
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser and 'delete_selected' in actions:
actions.pop('delete_selected')
if not request.user.has_perm('comments.can_moderate'):
if 'approve_comments' in actions:
actions.pop('approve_comments')
if 'remove_comments' in actions:
actions.pop('remove_comments')
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_flag,
lambda n: ungettext('flagged', 'flagged', n))
flag_comments.short_description = _("Flag selected comments")
def approve_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_approve,
lambda n: ungettext('approved', 'approved', n))
approve_comments.short_description = _("Approve selected comments")
def remove_comments(self, request, queryset):
self._bulk_flag(request, queryset, perform_delete,
lambda n: ungettext('removed', 'removed', n))
remove_comments.short_description = _("Remove selected comments")
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action. Actually
calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ungettext(u'1 comment was successfully %(action)s.',
u'%(count)s comments were successfully %(action)s.',
n_comments)
self.message_user(request, msg % {'count': n_comments, 'action': done_message(n_comments)})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
if get_model() is Comment:
admin.site.register(Comment, CommentsAdmin)
| bsd-3-clause |
DontBelieveMe/RoboIDE | qdarkstyle/__init__.py | 3 | 3698 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) <2013-2014> <Colin Duquesnoy>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
Initialise the QDarkStyleSheet module when used with python.
This modules provides a function to transparently load the stylesheets
with the correct rc file.
"""
import logging
import platform
__version__ = "2.3.0"
def _logger():
return logging.getLogger('qdarkstyle')
def load_stylesheet(pyside=True):
"""
Loads the stylesheet. Takes care of importing the rc module.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
if pyside:
import qdarkstyle.pyside_style_rc
else:
import qdarkstyle.pyqt_style_rc
# Load the stylesheet content from resources
if not pyside:
from PyQt4.QtCore import QFile, QTextStream
else:
from PySide.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #31363b;
text-align: center;
height: 12px;
}
'''
stylesheet += mac_fix
return stylesheet
def load_stylesheet_pyqt5():
"""
Loads the stylesheet for use in a pyqt5 application.
:param pyside: True to load the pyside rc file, False to load the PyQt rc file
:return the stylesheet string
"""
# Smart import of the rc file
import qdarkstyle.pyqt5_style_rc
# Load the stylesheet content from resources
from PyQt5.QtCore import QFile, QTextStream
f = QFile(":qdarkstyle/style.qss")
if not f.exists():
_logger().error("Unable to load stylesheet, file not found in "
"resources")
return ""
else:
f.open(QFile.ReadOnly | QFile.Text)
ts = QTextStream(f)
stylesheet = ts.readAll()
if platform.system().lower() == 'darwin': # see issue #12 on github
mac_fix = '''
QDockWidget::title
{
background-color: #31363b;
text-align: center;
height: 12px;
}
'''
stylesheet += mac_fix
return stylesheet
| mit |
giginet/django-generic-tagging | generic_tagging/templatetags/tagging.py | 1 | 1210 | from django import template
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from ..models import TaggedItem, Tag
register = template.Library()
@register.assignment_tag
def get_tagged_items_for(object):
'''retrieve tagged items which relative with the specific object.
:syntax: {% get_tagged_items_for <object> as <variable> %}
'''
return TaggedItem.objects.get_for_object(object)
@register.assignment_tag
def get_tags_for(object):
'''retrieve tags which relative with the specific object.
:syntax: {% get_tags_for <object> as <variable> %}
'''
return Tag.objects.get_for_object(object)
@register.assignment_tag
def get_content_type_for(object):
'''retrieve content type object for the specific object.
:syntax: {% get_content_type_for <object> as <variable> %}
'''
return ContentType.objects.get_for_model(object)
@register.simple_tag
def render_generic_tagging_head_tag():
return render_to_string('generic_tagging/head.html')
@register.simple_tag
def render_generic_tagging_component_tag_for(object):
return render_to_string('generic_tagging/component.html', {'object': object})
| mit |
raymondxyang/tensorflow | tensorflow/contrib/integrate/python/ops/odes.py | 60 | 24150 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ODE solvers for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
_ButcherTableau = collections.namedtuple('_ButcherTableau',
'alpha beta c_sol c_mid c_error')
# Parameters from Shampine (1986), section 4.
_DORMAND_PRINCE_TABLEAU = _ButcherTableau(
alpha=[1 / 5, 3 / 10, 4 / 5, 8 / 9, 1., 1.],
beta=[
[1 / 5],
[3 / 40, 9 / 40],
[44 / 45, -56 / 15, 32 / 9],
[19372 / 6561, -25360 / 2187, 64448 / 6561, -212 / 729],
[9017 / 3168, -355 / 33, 46732 / 5247, 49 / 176, -5103 / 18656],
[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84],
],
c_sol=[35 / 384, 0, 500 / 1113, 125 / 192, -2187 / 6784, 11 / 84, 0],
c_mid=[
6025192743 / 30085553152 / 2, 0, 51252292925 / 65400821598 / 2,
-2691868925 / 45128329728 / 2, 187940372067 / 1594534317056 / 2,
-1776094331 / 19743644256 / 2, 11237099 / 235043384 / 2
],
c_error=[
1951 / 21600 - 35 / 384,
0,
22642 / 50085 - 500 / 1113,
451 / 720 - 125 / 192,
-12231 / 42400 - -2187 / 6784,
649 / 6300 - 11 / 84,
1 / 60,
],)
def _possibly_nonzero(x):
return isinstance(x, ops.Tensor) or x != 0
def _scaled_dot_product(scale, xs, ys, name=None):
"""Calculate a scaled, vector inner product between lists of Tensors."""
with ops.name_scope(name, 'scaled_dot_product', [scale, xs, ys]) as scope:
# Some of the parameters in our Butcher tableau include zeros. Using
# _possibly_nonzero lets us avoid wasted computation.
return math_ops.add_n(
[(scale * x) * y for x, y in zip(xs, ys)
if _possibly_nonzero(x) or _possibly_nonzero(y)],
name=scope)
def _dot_product(xs, ys, name=None):
"""Calculate the vector inner product between two lists of Tensors."""
with ops.name_scope(name, 'dot_product', [xs, ys]) as scope:
return math_ops.add_n([x * y for x, y in zip(xs, ys)], name=scope)
def _runge_kutta_step(func,
y0,
f0,
t0,
dt,
tableau=_DORMAND_PRINCE_TABLEAU,
name=None):
"""Take an arbitrary Runge-Kutta step and estimate error.
Args:
func: Function to evaluate like `func(y, t)` to compute the time derivative
of `y`.
y0: Tensor initial value for the state.
f0: Tensor initial value for the derivative, computed from `func(y0, t0)`.
t0: float64 scalar Tensor giving the initial time.
dt: float64 scalar Tensor giving the size of the desired time step.
tableau: optional _ButcherTableau describing how to take the Runge-Kutta
step.
name: optional name for the operation.
Returns:
Tuple `(y1, f1, y1_error, k)` giving the estimated function value after
the Runge-Kutta step at `t1 = t0 + dt`, the derivative of the state at `t1`,
estimated error at `t1`, and a list of Runge-Kutta coefficients `k` used for
calculating these terms.
"""
with ops.name_scope(name, 'runge_kutta_step', [y0, f0, t0, dt]) as scope:
y0 = ops.convert_to_tensor(y0, name='y0')
f0 = ops.convert_to_tensor(f0, name='f0')
t0 = ops.convert_to_tensor(t0, name='t0')
dt = ops.convert_to_tensor(dt, name='dt')
dt_cast = math_ops.cast(dt, y0.dtype)
k = [f0]
for alpha_i, beta_i in zip(tableau.alpha, tableau.beta):
ti = t0 + alpha_i * dt
yi = y0 + _scaled_dot_product(dt_cast, beta_i, k)
k.append(func(yi, ti))
if not (tableau.c_sol[-1] == 0 and tableau.c_sol == tableau.beta[-1]):
# This property (true for Dormand-Prince) lets us save a few FLOPs.
yi = y0 + _scaled_dot_product(dt_cast, tableau.c_sol, k)
y1 = array_ops.identity(yi, name='%s/y1' % scope)
f1 = array_ops.identity(k[-1], name='%s/f1' % scope)
y1_error = _scaled_dot_product(
dt_cast, tableau.c_error, k, name='%s/y1_error' % scope)
return (y1, f1, y1_error, k)
def _interp_fit(y0, y1, y_mid, f0, f1, dt):
"""Fit coefficients for 4th order polynomial interpolation.
Args:
y0: function value at the start of the interval.
y1: function value at the end of the interval.
y_mid: function value at the mid-point of the interval.
f0: derivative value at the start of the interval.
f1: derivative value at the end of the interval.
dt: width of the interval.
Returns:
List of coefficients `[a, b, c, d, e]` for interpolating with the polynomial
`p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e` for values of `x`
between 0 (start of interval) and 1 (end of interval).
"""
# a, b, c, d, e = sympy.symbols('a b c d e')
# x, dt, y0, y1, y_mid, f0, f1 = sympy.symbols('x dt y0 y1 y_mid f0 f1')
# p = a * x ** 4 + b * x ** 3 + c * x ** 2 + d * x + e
# sympy.solve([p.subs(x, 0) - y0,
# p.subs(x, 1 / 2) - y_mid,
# p.subs(x, 1) - y1,
# (p.diff(x) / dt).subs(x, 0) - f0,
# (p.diff(x) / dt).subs(x, 1) - f1],
# [a, b, c, d, e])
# {a: -2.0*dt*f0 + 2.0*dt*f1 - 8.0*y0 - 8.0*y1 + 16.0*y_mid,
# b: 5.0*dt*f0 - 3.0*dt*f1 + 18.0*y0 + 14.0*y1 - 32.0*y_mid,
# c: -4.0*dt*f0 + dt*f1 - 11.0*y0 - 5.0*y1 + 16.0*y_mid,
# d: dt*f0,
# e: y0}
a = _dot_product([-2 * dt, 2 * dt, -8, -8, 16], [f0, f1, y0, y1, y_mid])
b = _dot_product([5 * dt, -3 * dt, 18, 14, -32], [f0, f1, y0, y1, y_mid])
c = _dot_product([-4 * dt, dt, -11, -5, 16], [f0, f1, y0, y1, y_mid])
d = dt * f0
e = y0
return [a, b, c, d, e]
def _interp_fit_rk(y0, y1, k, dt, tableau=_DORMAND_PRINCE_TABLEAU):
"""Fit an interpolating polynomial to the results of a Runge-Kutta step."""
with ops.name_scope('interp_fit_rk'):
dt = math_ops.cast(dt, y0.dtype)
y_mid = y0 + _scaled_dot_product(dt, tableau.c_mid, k)
f0 = k[0]
f1 = k[-1]
return _interp_fit(y0, y1, y_mid, f0, f1, dt)
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
def _optimal_step_size(last_step,
error_ratio,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
order=5,
name=None):
"""Calculate the optimal size for the next Runge-Kutta step."""
with ops.name_scope(name, 'optimal_step_size', [last_step,
error_ratio]) as scope:
error_ratio = math_ops.cast(error_ratio, last_step.dtype)
exponent = math_ops.cast(1 / order, last_step.dtype)
# this looks more complex than necessary, but importantly it keeps
# error_ratio in the numerator so we can't divide by zero:
factor = math_ops.maximum(1 / ifactor,
math_ops.minimum(error_ratio**exponent / safety,
1 / dfactor))
return math_ops.div(last_step, factor, name=scope)
def _abs_square(x):
if x.dtype.is_complex:
return math_ops.square(math_ops.real(x)) + math_ops.square(math_ops.imag(x))
else:
return math_ops.square(x)
def _ta_append(tensor_array, value):
"""Append a value to the end of a tf.TensorArray."""
return tensor_array.write(tensor_array.size(), value)
class _RungeKuttaState(
collections.namedtuple('_RungeKuttaState',
'y1, f1, t0, t1, dt, interp_coeff')):
"""Saved state of the Runge Kutta solver.
Attributes:
y1: Tensor giving the function value at the end of the last time step.
f1: Tensor giving derivative at the end of the last time step.
t0: scalar float64 Tensor giving start of the last time step.
t1: scalar float64 Tensor giving end of the last time step.
dt: scalar float64 Tensor giving the size for the next time step.
interp_coef: list of Tensors giving coefficients for polynomial
interpolation between `t0` and `t1`.
"""
class _History(
collections.namedtuple('_History', 'integrate_points, error_ratio')):
"""Saved integration history for use in `info_dict`.
Attributes:
integrate_points: tf.TensorArray storing integrating time points.
error_ratio: tf.TensorArray storing computed error ratios at each
integration step.
"""
def _assert_increasing(t):
assert_increasing = control_flow_ops.Assert(
math_ops.reduce_all(t[1:] > t[:-1]), ['`t` must be monotonic increasing'])
return ops.control_dependencies([assert_increasing])
def _check_input_types(t, y0):
if not (y0.dtype.is_floating or y0.dtype.is_complex):
raise TypeError('`y0` must have a floating point or complex floating '
'point dtype')
if not t.dtype.is_floating:
raise TypeError('`t` must have a floating point dtype')
def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(name, 'dopri5', [
y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps
]) as scope:
first_step = ops.convert_to_tensor(
first_step, dtype=t.dtype, name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(
max_num_steps, dtype=dtypes.int32, name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(t0 + dt > t0,
['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step, lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next,
interp_coeff)
with ops.name_scope('update/history'):
history = _History(
_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step, (rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(rk_state.interp_coeff, rk_state.t0, rk_state.t1,
t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
with _assert_increasing(t):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate, (solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {
'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()
}
return (y, info_dict)
def odeint(func,
y0,
t,
rtol=1e-6,
atol=1e-12,
method=None,
options=None,
full_output=False,
name=None):
"""Integrate a system of ordinary differential equations.
Solves the initial value problem for a non-stiff system of first order ODEs:
```
dy/dt = func(y, t), y(t[0]) = y0
```
where y is a Tensor of any shape.
For example:
```
# solve `dy/dt = -y`, corresponding to exponential decay
tf.contrib.integrate.odeint(lambda y, _: -y, 1.0, [0, 1, 2])
=> [1, exp(-1), exp(-2)]
```
Output dtypes and numerical precision are based on the dtypes of the inputs
`y0` and `t`.
Currently, implements 5th order Runge-Kutta with adaptive step size control
and dense output, using the Dormand-Prince method. Similar to the 'dopri5'
method of `scipy.integrate.ode` and MATLAB's `ode45`.
Based on: Shampine, Lawrence F. (1986), "Some Practical Runge-Kutta Formulas",
Mathematics of Computation, American Mathematical Society, 46 (173): 135-150,
doi:10.2307/2008219
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`. May
have any floating point or complex dtype.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype. If not provided as a Tensor, converted to a Tensor with
float64 dtype.
rtol: optional float64 Tensor specifying an upper bound on relative error,
per element of `y`.
atol: optional float64 Tensor specifying an upper bound on absolute error,
per element of `y`.
method: optional string indicating the integration method to use. Currently,
the only valid option is `'dopri5'`.
options: optional dict of configuring options for the indicated integration
method. Can only be provided if a `method` is explicitly set. For
`'dopri5'`, valid options include:
* first_step: an initial guess for the size of the first integration
(current default: 1.0, but may later be changed to use heuristics based
on the gradient).
* safety: safety factor for adaptive step control, generally a constant
in the range 0.8-1 (default: 0.9).
* ifactor: maximum factor by which the adaptive step may be increased
(default: 10.0).
* dfactor: maximum factor by which the adpative step may be decreased
(default: 0.2).
* max_num_steps: integer maximum number of integrate steps between time
points in `t` (default: 1000).
full_output: optional boolean. If True, `odeint` returns a tuple
`(y, info_dict)` describing the integration process.
name: Optional name for this operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
info_dict: only if `full_output == True`. A dict with the following values:
* num_func_evals: integer Tensor counting the number of function
evaluations.
* integrate_points: 1D float64 Tensor with the upper bound of each
integration time step.
* error_ratio: 1D float Tensor with the estimated ratio of the integration
error to the error tolerance at each integration step. An ratio greater
than 1 corresponds to rejected steps.
Raises:
ValueError: if an invalid `method` is provided.
TypeError: if `options` is supplied without `method`, or if `t` or `y0` has
an invalid dtype.
"""
if method is not None and method != 'dopri5':
raise ValueError('invalid method: %r' % method)
if options is None:
options = {}
elif method is None:
raise ValueError('cannot supply `options` without specifying `method`')
with ops.name_scope(name, 'odeint', [y0, t, rtol, atol]) as scope:
# TODO(shoyer): use nest.flatten (like tf.while_loop) to allow `y0` to be an
# arbitrarily nested tuple. This will help performance and usability by
# avoiding the need to pack/unpack in user functions.
y0 = ops.convert_to_tensor(y0, name='y0')
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
_check_input_types(t, y0)
error_dtype = abs(y0).dtype
rtol = ops.convert_to_tensor(rtol, dtype=error_dtype, name='rtol')
atol = ops.convert_to_tensor(atol, dtype=error_dtype, name='atol')
return _dopri5(
func,
y0,
t,
rtol=rtol,
atol=atol,
full_output=full_output,
name=scope,
**options)
class _FixedGridIntegrator(six.with_metaclass(abc.ABCMeta)):
"""Base class for fixed-grid ODE integrators."""
def integrate(self, evol_func, y0, time_grid):
time_delta_grid = time_grid[1:] - time_grid[:-1]
scan_func = self._make_scan_func(evol_func)
y_grid = functional_ops.scan(scan_func, (time_grid[:-1], time_delta_grid),
y0)
return array_ops.concat([[y0], y_grid], axis=0)
def _make_scan_func(self, evol_func):
def scan_func(y, t_and_dt):
t, dt = t_and_dt
dy = self._step_func(evol_func, t, dt, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func
@abc.abstractmethod
def _step_func(self, evol_func, t, dt, y):
pass
class _MidpointFixedGridIntegrator(_FixedGridIntegrator):
def _step_func(self, evol_func, t, dt, y):
dt_cast = math_ops.cast(dt, y.dtype)
# yn1 = yn + h * f(tn + h/2, yn + f(tn, yn) * h/2)
return dt_cast * evol_func(y + evol_func(y, t) * dt_cast / 2, t + dt / 2)
class _RK4FixedGridIntegrator(_FixedGridIntegrator):
def _step_func(self, evol_func, t, dt, y):
k1 = evol_func(y, t)
half_step = t + dt / 2
dt_cast = math_ops.cast(dt, y.dtype)
k2 = evol_func(y + dt_cast * k1 / 2, half_step)
k3 = evol_func(y + dt_cast * k2 / 2, half_step)
k4 = evol_func(y + dt_cast * k3, t + dt)
return math_ops.add_n([k1, 2 * k2, 2 * k3, k4]) * (dt_cast / 6)
def odeint_fixed(func, y0, t, method='rk4', name=None):
"""ODE integration on a fixed grid (with no step size control).
Useful in certain scenarios to avoid the overhead of adaptive step size
control, e.g. when differentiation of the integration result is desired and/or
the time grid is known a priori to be sufficient.
Args:
func: Function that maps a Tensor holding the state `y` and a scalar Tensor
`t` into a Tensor of state derivatives with respect to time.
y0: N-D Tensor giving starting value of `y` at time point `t[0]`.
t: 1-D Tensor holding a sequence of time points for which to solve for
`y`. The initial time point should be the first element of this sequence,
and each time must be larger than the previous time. May have any floating
point dtype.
method: One of 'midpoint' or 'rk4'.
name: Optional name for the resulting operation.
Returns:
y: (N+1)-D tensor, where the first dimension corresponds to different
time points. Contains the solved value of y for each desired time point in
`t`, with the initial value `y0` being the first element along the first
dimension.
Raises:
ValueError: Upon caller errors.
"""
with ops.name_scope(name, 'odeint_fixed', [y0, t]):
t = ops.convert_to_tensor(t, preferred_dtype=dtypes.float64, name='t')
y0 = ops.convert_to_tensor(y0, name='y0')
_check_input_types(t, y0)
with _assert_increasing(t):
with ops.name_scope(method):
if method == 'midpoint':
return _MidpointFixedGridIntegrator().integrate(func, y0, t)
elif method == 'rk4':
return _RK4FixedGridIntegrator().integrate(func, y0, t)
else:
raise ValueError('method not supported: {!s}'.format(method))
| apache-2.0 |
framon/samba | source3/selftest/tests.py | 1 | 27373 | #!/usr/bin/python
# This script generates a list of testsuites that should be run as part of
# the Samba 3 test suite.
# The output of this script is parsed by selftest.pl, which then decides
# which of the tests to actually run. It will, for example, skip all tests
# listed in selftest/skip or only run a subset during "make quicktest".
# The idea is that this script outputs all of the tests of Samba 3, not
# just those that are known to pass, and list those that should be skipped
# or are known to fail in selftest/skip or selftest/samba3-knownfail. This makes it
# very easy to see what functionality is still missing in Samba 3 and makes
# it possible to run the testsuite against other servers, such as Samba 4 or
# Windows that have a different set of features.
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
import os, sys
sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "../../selftest")))
import selftesthelpers
from selftesthelpers import *
smbtorture4_options.extend([
'--option="torture:winbindd_netbios_name=$SERVER"',
'--option="torture:winbindd_netbios_domain=$DOMAIN"',
'--option=torture:sharedelay=100000',
'--option=torture:writetimeupdatedelay=500000',
])
smbtorture4_possible = print_smbtorture4_version()
def plansmbtorture4testsuite(name, env, options, description=''):
if description == '':
modname = "samba3.%s" % (name, )
else:
modname = "samba3.%s %s" % (name, description)
if smbtorture4_possible:
selftesthelpers.plansmbtorture4testsuite(
name, env, options, target='samba3', modname=modname)
else:
skiptestsuite(name, "smbtorture4 is not available")
plantestsuite("samba3.blackbox.success", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/test_success.sh")])
plantestsuite("samba3.blackbox.failure", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/test_failure.sh")])
plantestsuite("samba3.local_s3", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/test_local_s3.sh")])
plantestsuite("samba3.blackbox.registry.upgrade", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/test_registry_upgrade.sh"), net, dbwrap_tool])
tests = ["FDPASS", "LOCK1", "LOCK2", "LOCK3", "LOCK4", "LOCK5", "LOCK6", "LOCK7", "LOCK9",
"UNLINK", "BROWSE", "ATTR", "TRANS2", "TORTURE",
"OPLOCK1", "OPLOCK2", "OPLOCK4", "STREAMERROR",
"DIR", "DIR1", "DIR-CREATETIME", "TCON", "TCONDEV", "RW1", "RW2", "RW3", "LARGE_READX", "RW-SIGNING",
"OPEN", "XCOPY", "RENAME", "DELETE", "DELETE-LN", "PROPERTIES", "W2K",
"TCON2", "IOCTL", "CHKPATH", "FDSESS", "CHAIN1", "CHAIN2",
"CHAIN3",
"GETADDRINFO", "UID-REGRESSION-TEST", "SHORTNAME-TEST",
"CASE-INSENSITIVE-CREATE", "SMB2-BASIC", "NTTRANS-FSCTL", "SMB2-NEGPROT",
"SMB2-SESSION-REAUTH", "SMB2-SESSION-RECONNECT",
"CLEANUP1",
"CLEANUP2",
"BAD-NBT-SESSION"]
for t in tests:
plantestsuite("samba3.smbtorture_s3.plain(s3dc).%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.crypt_client(s3dc).%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "-e", "-l $LOCAL_PATH"])
if t == "TORTURE":
# this is a negative test to verify that the server rejects
# access without encryption
plantestsuite("samba3.smbtorture_s3.crypt_server(s3dc).%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmpenc', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.plain(dc).%s" % t, "dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
tests = ["RW1", "RW2", "RW3"]
for t in tests:
plantestsuite("samba3.smbtorture_s3.vfs_aio_fork(simpleserver).%s" % t, "simpleserver", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/vfs_aio_fork', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
posix_tests = ["POSIX", "POSIX-APPEND"]
for t in posix_tests:
plantestsuite("samba3.smbtorture_s3.plain(s3dc).%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.crypt(s3dc).%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "-e", "-l $LOCAL_PATH"])
plantestsuite("samba3.smbtorture_s3.plain(dc).%s" % t, "dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/posix_share', '$USERNAME', '$PASSWORD', smbtorture3, "", "-l $LOCAL_PATH"])
env = "s3dc:local"
t = "CLEANUP3"
plantestsuite("samba3.smbtorture_s3.plain(%s).%s" % (env, t), env, [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', binpath('smbtorture3'), "", "-l $LOCAL_PATH"])
local_tests = [
"LOCAL-SUBSTITUTE",
"LOCAL-GENCACHE",
"LOCAL-TALLOC-DICT",
"LOCAL-BASE64",
"LOCAL-RBTREE",
"LOCAL-MEMCACHE",
"LOCAL-STREAM-NAME",
"LOCAL-WBCLIENT",
"LOCAL-string_to_sid",
"LOCAL-sid_to_string",
"LOCAL-binary_to_sid",
"LOCAL-DBTRANS",
"LOCAL-TEVENT-SELECT",
"LOCAL-CONVERT-STRING",
"LOCAL-CONV-AUTH-INFO",
"LOCAL-IDMAP-TDB-COMMON",
"LOCAL-hex_encode_buf",
"LOCAL-sprintf_append",
"LOCAL-remove_duplicate_addrs2"]
for t in local_tests:
plantestsuite("samba3.smbtorture_s3.%s" % t, "s3dc", [os.path.join(samba3srcdir, "script/tests/test_smbtorture_s3.sh"), t, '//$SERVER_IP/tmp', '$USERNAME', '$PASSWORD', smbtorture3, "-e"])
tests = ["--ping", "--separator",
"--own-domain",
"--all-domains",
"--trusted-domains",
"--domain-info=BUILTIN",
"--domain-info=$DOMAIN",
"--online-status",
"--online-status --domain=BUILTIN",
"--online-status --domain=$DOMAIN",
"--check-secret --domain=$DOMAIN",
"--change-secret --domain=$DOMAIN",
"--check-secret --domain=$DOMAIN",
"--online-status --domain=$DOMAIN",
#Didn't pass yet# "--domain-users",
"--domain-groups",
"--name-to-sid=$DC_USERNAME",
"--name-to-sid=$DOMAIN\\\\$DC_USERNAME",
#Didn't pass yet# "--user-info=$USERNAME",
"--user-groups=$DOMAIN\\\\$DC_USERNAME",
"--authenticate=$DOMAIN\\\\$DC_USERNAME%$DC_PASSWORD",
"--allocate-uid",
"--allocate-gid"]
plantestsuite("samba.vfstest.stream_depot", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/stream-depot/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.xattr-tdb-1", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/xattr-tdb-1/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.acl", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/vfstest-acl/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
plantestsuite("samba.vfstest.catia", "s3dc:local", [os.path.join(samba3srcdir, "script/tests/vfstest-catia/run.sh"), binpath("vfstest"), "$PREFIX", configuration])
for options in ["--option=clientusespnego=no", " --option=clientntlmv2auth=no --option=clientlanmanauth=yes --max-protocol=LANMAN2", ""]:
env = "s3dc"
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) %s" % (env, options), env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$DC_USERNAME', '$DC_PASSWORD', smbclient3, configuration, options])
for env in ["s3dc", "member", "s3member", "dc", "s4member"]:
plantestsuite("samba3.blackbox.smbclient_machine_auth.plain (%s:local)" % env, "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_machine_auth.sh"), '$SERVER', smbclient3, configuration])
for env in ["s3dc", "member", "s3member"]:
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$DC_USERNAME', '$DC_PASSWORD', smbclient3, configuration])
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$SERVER\\\\$USERNAME', '$PASSWORD', smbclient3, configuration])
for t in tests:
plantestsuite("samba3.wbinfo_s3.(%s:local).%s" % (env, t), "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_wbinfo_s3.sh"), t])
plantestsuite(
"samba3.wbinfo_sids2xids.(%s:local)" % env, "%s:local" % env,
[os.path.join(samba3srcdir, "script/tests/test_wbinfo_sids2xids.sh")])
plantestsuite(
"samba3.ntlm_auth.diagnostics(%s:local)" % env, "%s:local" % env,
[os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_diagnostics.sh"), ntlm_auth3, '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', configuration])
plantestsuite("samba3.ntlm_auth.(%s:local)" % env, "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_s3.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', configuration])
env = "s3member"
t = "--krb5auth=$DOMAIN\\\\$DC_USERNAME%$DC_PASSWORD"
plantestsuite("samba3.wbinfo_s3.(%s:local).%s" % (env, t), "%s:local" % env, [os.path.join(samba3srcdir, "script/tests/test_wbinfo_s3.sh"), t])
plantestsuite("samba3.ntlm_auth.krb5(ktest:local) old ccache", "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_krb5.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$PREFIX/ktest/krb5_ccache-2', '$SERVER', configuration])
plantestsuite("samba3.ntlm_auth.krb5(ktest:local)", "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_ntlm_auth_krb5.sh"), valgrindify(python), samba3srcdir, ntlm_auth3, '$PREFIX/ktest/krb5_ccache-3', '$SERVER', configuration])
for env in ["maptoguest", "simpleserver"]:
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) local creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', smbclient3, configuration + " --option=clientntlmv2auth=no --option=clientlanmanauth=yes"])
env = "maptoguest"
plantestsuite("samba3.blackbox.smbclient_auth.plain (%s) bad username" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_auth.sh"), '$SERVER', '$SERVER_IP', 'notmy$USERNAME', '$PASSWORD', smbclient3, configuration + " --option=clientntlmv2auth=no --option=clientlanmanauth=yes"])
# plain
for env in ["s3dc"]:
plantestsuite("samba3.blackbox.smbclient_s3.plain (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration])
for env in ["member", "s3member"]:
plantestsuite("samba3.blackbox.smbclient_s3.plain (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$SERVER', '$SERVER\\\\$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration])
for env in ["s3dc"]:
plantestsuite("samba3.blackbox.smbclient_s3.sign (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$DC_USERNAME', '$DC_PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "--signing=required"])
for env in ["member", "s3member"]:
plantestsuite("samba3.blackbox.smbclient_s3.sign (%s) member creds" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$SERVER', '$SERVER\\\\$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "--signing=required"])
for env in ["s3dc"]:
# encrypted
plantestsuite("samba3.blackbox.smbclient_s3.crypt (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_s3.sh"), '$SERVER', '$SERVER_IP', '$DOMAIN', '$USERNAME', '$PASSWORD', '$USERID', '$LOCAL_PATH', '$PREFIX', smbclient3, wbinfo, net, configuration, "-e"])
# Test smbclient/tarmode
plantestsuite("samba3.blackbox.smbclient_tarmode (%s)" % env, env, [os.path.join(samba3srcdir, "script/tests/test_smbclient_tarmode.sh"), '$SERVER', '$SERVER_IP', '$USERNAME', '$PASSWORD', '$LOCAL_PATH', '$PREFIX', smbclient3, configuration])
#TODO encrypted against member, with member creds, and with DC creds
plantestsuite("samba3.blackbox.net.misc", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_misc.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.local.registry", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.registry.check", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_check.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, dbwrap_tool])
plantestsuite("samba3.blackbox.net.rpc.registry", "s3dc",
[os.path.join(samba3srcdir, "script/tests/test_net_registry.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.net.local.registry.roundtrip", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_roundtrip.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.rpc.registry.roundtrip", "s3dc",
[os.path.join(samba3srcdir, "script/tests/test_net_registry_roundtrip.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.net.local.conf", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_net_conf.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration])
plantestsuite("samba3.blackbox.net.rpc.conf", "s3dc",
[os.path.join(samba3srcdir, "script/tests/test_net_conf.sh"),
scriptdir, "$SMB_CONF_PATH", net, configuration, 'rpc'])
plantestsuite("samba3.blackbox.testparm", "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_testparm_s3.sh"),
"$LOCAL_PATH"])
plantestsuite(
"samba3.pthreadpool", "s3dc",
[os.path.join(samba3srcdir, "script/tests/test_pthreadpool.sh")])
#smbtorture4 tests
base = ["base.attr", "base.charset", "base.chkpath", "base.defer_open", "base.delaywrite", "base.delete",
"base.deny1", "base.deny2", "base.deny3", "base.denydos", "base.dir1", "base.dir2",
"base.disconnect", "base.fdpass", "base.lock",
"base.mangle", "base.negnowait", "base.ntdeny1",
"base.ntdeny2", "base.open", "base.openattr", "base.properties", "base.rename", "base.rw1",
"base.secleak", "base.tcon", "base.tcondev", "base.trans2", "base.unlink", "base.vuid",
"base.xcopy", "base.samba3error"]
raw = ["raw.acls", "raw.chkpath", "raw.close", "raw.composite", "raw.context", "raw.eas",
"raw.ioctl", "raw.lock", "raw.mkdir", "raw.mux", "raw.notify", "raw.open", "raw.oplock",
"raw.qfileinfo", "raw.qfsinfo", "raw.read", "raw.rename", "raw.search", "raw.seek",
"raw.sfileinfo.base", "raw.sfileinfo.bug", "raw.streams", "raw.unlink", "raw.write",
"raw.samba3hide", "raw.samba3badpath", "raw.sfileinfo.rename", "raw.session",
"raw.samba3caseinsensitive", "raw.samba3posixtimedlock",
"raw.samba3rootdirfid", "raw.sfileinfo.end-of-file",
"raw.bench-oplock", "raw.bench-lock", "raw.bench-open", "raw.bench-tcon",
"raw.samba3checkfsp", "raw.samba3closeerr", "raw.samba3oplocklogoff", "raw.samba3badnameblob"]
smb2 = smbtorture4_testsuites("smb2.")
rpc = ["rpc.authcontext", "rpc.samba3.bind", "rpc.samba3.srvsvc", "rpc.samba3.sharesec",
"rpc.samba3.spoolss", "rpc.samba3.wkssvc", "rpc.samba3.winreg",
"rpc.samba3.getaliasmembership-0",
"rpc.samba3.netlogon", "rpc.samba3.sessionkey", "rpc.samba3.getusername",
"rpc.samba3.smb1-pipe-name", "rpc.samba3.smb2-pipe-name",
"rpc.samba3.smb-reauth1", "rpc.samba3.smb-reauth2",
"rpc.svcctl", "rpc.ntsvcs", "rpc.winreg", "rpc.eventlog",
"rpc.spoolss.printserver", "rpc.spoolss.win", "rpc.spoolss.notify", "rpc.spoolss.printer",
"rpc.spoolss.driver",
"rpc.lsa", "rpc.lsa-getuser", "rpc.lsa.lookupsids", "rpc.lsa.lookupnames",
"rpc.lsa.privileges", "rpc.lsa.secrets",
"rpc.samr", "rpc.samr.users", "rpc.samr.users.privileges", "rpc.samr.passwords",
"rpc.samr.passwords.pwdlastset", "rpc.samr.large-dc", "rpc.samr.machine.auth",
"rpc.samr.priv", "rpc.samr.passwords.validate",
"rpc.netlogon.admin",
"rpc.schannel", "rpc.schannel2", "rpc.bench-schannel1", "rpc.join", "rpc.bind"]
local = ["local.nss-wrapper", "local.ndr"]
winbind = ["winbind.struct", "winbind.wbclient", "winbind.pac"]
idmap = [ "idmap.rfc2307" ]
rap = ["rap.basic", "rap.rpc", "rap.printing", "rap.sam"]
unix = ["unix.info2", "unix.whoami"]
nbt = ["nbt.dgram" ]
libsmbclient = ["libsmbclient"]
tests= base + raw + smb2 + rpc + unix + local + winbind + rap + nbt + libsmbclient + idmap
for t in tests:
if t == "base.delaywrite":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --maximum-runtime=900')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --maximum-runtime=900')
elif t == "rap.sam":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=doscharset=ISO-8859-1')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD --option=doscharset=ISO-8859-1')
elif t == "winbind.pac":
plansmbtorture4testsuite(t, "s3member:local", '//$SERVER/tmp --realm=$REALM --machine-pass --option=torture:addc=$DC_SERVER', description="machine account")
elif t == "unix.whoami":
plansmbtorture4testsuite(t, "member:local", '//$SERVER/tmp --machine-pass', description="machine account")
plansmbtorture4testsuite(t, "s3member:local", '//$SERVER/tmp --machine-pass --option=torture:addc=$DC_SERVER', description="machine account")
for env in ["s3dc", "member"]:
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -U$DC_USERNAME%$DC_PASSWORD')
plansmbtorture4testsuite(t, env, '//$SERVER/tmpguest -U%', description='anonymous connection')
for env in ["plugin_s4_dc", "s3member"]:
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -U$DC_USERNAME@$REALM%$DC_PASSWORD --option=torture:addc=$DC_SERVER')
plansmbtorture4testsuite(t, env, '//$SERVER/tmp -k yes -U$DC_USERNAME@$REALM%$DC_PASSWORD --option=torture:addc=$DC_SERVER', description='kerberos connection')
plansmbtorture4testsuite(t, env, '//$SERVER/tmpguest -U% --option=torture:addc=$DC_SERVER', description='anonymous connection')
elif t == "raw.samba3posixtimedlock":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmpguest -U$USERNAME%$PASSWORD --option=torture:localdir=$SELFTEST_PREFIX/s3dc/share')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmpguest -U$USERNAME%$PASSWORD --option=torture:localdir=$SELFTEST_PREFIX/plugin_s4_dc/share')
elif t == "raw.chkpath":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
elif t == "raw.samba3hide" or t == "raw.samba3checkfsp" or t == "raw.samba3closeerr":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "simpleserver", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "raw.session" or t == "smb2.session":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'plain')
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmpenc -U$USERNAME%$PASSWORD', 'enc')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER/tmp -k no -U$USERNAME%$PASSWORD', 'ntlm')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER/tmp -k yes -U$USERNAME%$PASSWORD', 'krb5')
elif t == "rpc.lsa":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'over ncacn_np ')
plansmbtorture4testsuite(t, "s3dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD', 'over ncacn_np ')
plansmbtorture4testsuite(t, "plugin_s4_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
elif t == "rpc.samr.passwords.validate":
plansmbtorture4testsuite(t, "s3dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
plansmbtorture4testsuite(t, "plugin_s4_dc", 'ncacn_ip_tcp:$SERVER_IP -U$USERNAME%$PASSWORD', 'over ncacn_ip_tcp ')
elif t == "smb2.durable-open" or t == "smb2.durable-v2-open":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/durable -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/durable -U$USERNAME%$PASSWORD')
elif t == "base.rw1":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/valid-users-tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/write-list-tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
elif t == "idmap.rfc2307":
plantestsuite(t, "s3member_rfc2307", [os.path.join(samba3srcdir, "../nsswitch/tests/test_idmap_rfc2307.sh"), '$DOMAIN', 'Administrator', '2000000', '"Domain Users"', '2000001', 'ou=idmap,dc=samba,dc=example,dc=com', '$DC_SERVER', '$DC_USERNAME', '$DC_PASSWORD'])
elif t == "raw.acls":
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/nfs4acl_simple -U$USERNAME%$PASSWORD', description='nfs4acl_xattr-simple')
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/nfs4acl_special -U$USERNAME%$PASSWORD', description='nfs4acl_xattr-special')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER_IP/tmpcase -U$USERNAME%$PASSWORD')
else:
plansmbtorture4testsuite(t, "s3dc", '//$SERVER_IP/tmp -U$USERNAME%$PASSWORD')
plansmbtorture4testsuite(t, "plugin_s4_dc", '//$SERVER/tmp -U$USERNAME%$PASSWORD')
test = 'rpc.lsa.lookupsids'
auth_options = ["", "ntlm", "spnego", "spnego,ntlm" ]
signseal_options = ["", ",connect", ",sign", ",seal"]
endianness_options = ["", ",bigendian"]
for s in signseal_options:
for e in endianness_options:
for a in auth_options:
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
options = binding_string + " -U$USERNAME%$PASSWORD"
plansmbtorture4testsuite(test, "s3dc", options, 'over ncacn_np with [%s%s%s] ' % (a, s, e))
plantestsuite("samba3.blackbox.rpcclient over ncacn_np with [%s%s%s] " % (a, s, e), "s3dc:local", [os.path.join(samba3srcdir, "script/tests/test_rpcclient.sh"),
"none", options, configuration])
# We should try more combinations in future, but this is all
# the pre-calculated credentials cache supports at the moment
e = ""
a = ""
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
options = binding_string + " -k yes --krb5-ccache=$PREFIX/ktest/krb5_ccache-2"
plansmbtorture4testsuite(test, "ktest", options, 'krb5 with old ccache ncacn_np with [%s%s%s] ' % (a, s, e))
options = binding_string + " -k yes --krb5-ccache=$PREFIX/ktest/krb5_ccache-3"
plansmbtorture4testsuite(test, "ktest", options, 'krb5 ncacn_np with [%s%s%s] ' % (a, s, e))
auth_options2 = ["krb5", "spnego,krb5"]
for a in auth_options2:
binding_string = "ncacn_np:$SERVER[%s%s%s]" % (a, s, e)
plantestsuite("samba3.blackbox.rpcclient krb5 ncacn_np with [%s%s%s] " % (a, s, e), "ktest:local", [os.path.join(samba3srcdir, "script/tests/test_rpcclient.sh"),
"$PREFIX/ktest/krb5_ccache-3", binding_string, "-k", configuration])
options_list = ["", "-e"]
for options in options_list:
plantestsuite("samba3.blackbox.smbclient_krb5 old ccache %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_krb5.sh"),
"$PREFIX/ktest/krb5_ccache-2",
smbclient3, "$SERVER", options, configuration])
plantestsuite("samba3.blackbox.smbclient_krb5 old ccache %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_krb5.sh"),
"$PREFIX/ktest/krb5_ccache-2",
smbclient3, "$SERVER", options, configuration])
plantestsuite("samba3.blackbox.smbclient_large_file %s" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"$PREFIX/ktest/krb5_ccache-3",
smbclient3, "$SERVER", "$PREFIX", options, "-k " + configuration])
plantestsuite("samba3.blackbox.smbclient_posix_large %s krb5" % options, "ktest:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"$PREFIX/ktest/krb5_ccache-3",
smbclient3, "$SERVER", "$PREFIX", options, "-k " + configuration])
plantestsuite("samba3.blackbox.smbclient_posix_large %s NTLM" % options, "s3dc:local",
[os.path.join(samba3srcdir, "script/tests/test_smbclient_posix_large.sh"),
"none",
smbclient3, "$SERVER", "$PREFIX", options, "-U$USERNAME%$PASSWORD " + configuration])
for e in endianness_options:
for a in auth_options:
for s in signseal_options:
binding_string = "ncacn_ip_tcp:$SERVER_IP[%s%s%s]" % (a, s, e)
options = binding_string + " -U$USERNAME%$PASSWORD"
plansmbtorture4testsuite(test, "s3dc", options, 'over ncacn_ip_tcp with [%s%s%s] ' % (a, s, e))
plansmbtorture4testsuite('rpc.epmapper', 's3dc:local', 'ncalrpc: -U$USERNAME%$PASSWORD', 'over ncalrpc')
| gpl-3.0 |
pplatek/odoo | addons/hr_recruitment/__openerp__.py | 260 | 2780 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recruitment Process',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Jobs, Recruitment, Applications, Job Interviews, Surveys',
'description': """
Manage job positions and the recruitment process
================================================
This application allows you to easily keep track of jobs, vacancies, applications, interviews...
It is integrated with the mail gateway to automatically fetch email sent to <[email protected]> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs.
You can define the different phases of interviews and easily rate the applicant from the kanban view.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'decimal_precision',
'hr',
'survey',
'calendar',
'fetchmail',
'web_kanban_gauge',
],
'data': [
'wizard/hr_recruitment_create_partner_job_view.xml',
'hr_recruitment_view.xml',
'hr_recruitment_menu.xml',
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'report/hr_recruitment_report_view.xml',
'hr_recruitment_installer_view.xml',
'res_config_view.xml',
'survey_data_recruitment.xml',
'hr_recruitment_data.xml',
'views/hr_recruitment.xml',
],
'demo': ['hr_recruitment_demo.xml'],
'test': ['test/recruitment_process.yml'],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
watsonpy/watson-filters | tests/watson/filters/test_string.py | 1 | 1528 | # -*- coding: utf-8 -*-
from datetime import datetime
from watson.filters.string import Trim, RegEx, Numbers, Upper, Lower, StripTags, HtmlEntities, Date
class TestTrim(object):
def test_trim_string(self):
filter = Trim()
assert filter(' Test') == 'Test'
assert filter('Test') == 'Test'
def test_trim_none(self):
filter = Trim()
assert not filter(None)
class TestUpper(object):
def test_to_upper(self):
filter = Upper()
assert filter('test') == 'TEST'
class TestLower(object):
def test_to_upper(self):
filter = Lower()
assert filter('TEST') == 'test'
class TestRegEx(object):
def test_replace_string(self):
filter = RegEx('ing', replacement='ed')
assert filter('testing') == 'tested'
class TestNumbers(object):
def test_remove_numbers(self):
filter = Numbers()
assert filter('ab1234') == '1234'
class TestStripTags(object):
def test_strip_tags(self):
filter = StripTags()
assert filter('test<div>blah</div>') == 'testblah'
class TestHtmlEntities(object):
def test_encode(self):
filter = HtmlEntities()
assert filter('<div>test</div>') == '<div>test</div>'
class TestDate(object):
def test_date(self):
filter = Date()
date = filter('2013-09-12')
assert datetime(2013, 9, 12, 0, 0) == date
def test_none_date(self):
filter = Date()
date = filter(None)
assert not date
| bsd-3-clause |
happyleavesaoc/home-assistant | homeassistant/components/cover/rpi_gpio.py | 7 | 3381 | """
Support for controlling a Raspberry Pi cover.
Instructions for building the controller can be found here
https://github.com/andrewshilliday/garage-door-controller
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.rpi_gpio/
"""
import logging
from time import sleep
import voluptuous as vol
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.components.rpi_gpio as rpi_gpio
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COVERS = 'covers'
CONF_RELAY_PIN = 'relay_pin'
CONF_RELAY_TIME = 'relay_time'
CONF_STATE_PIN = 'state_pin'
CONF_STATE_PULL_MODE = 'state_pull_mode'
DEFAULT_RELAY_TIME = .2
DEFAULT_STATE_PULL_MODE = 'UP'
DEPENDENCIES = ['rpi_gpio']
_COVERS_SCHEMA = vol.All(
cv.ensure_list,
[
vol.Schema({
CONF_NAME: cv.string,
CONF_RELAY_PIN: cv.positive_int,
CONF_STATE_PIN: cv.positive_int,
})
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): _COVERS_SCHEMA,
vol.Optional(CONF_STATE_PULL_MODE, default=DEFAULT_STATE_PULL_MODE):
cv.string,
vol.Optional(CONF_RELAY_TIME, default=DEFAULT_RELAY_TIME): cv.positive_int,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the RPi cover platform."""
relay_time = config.get(CONF_RELAY_TIME)
state_pull_mode = config.get(CONF_STATE_PULL_MODE)
covers = []
covers_conf = config.get(CONF_COVERS)
for cover in covers_conf:
covers.append(RPiGPIOCover(
cover[CONF_NAME], cover[CONF_RELAY_PIN], cover[CONF_STATE_PIN],
state_pull_mode, relay_time))
add_devices(covers)
class RPiGPIOCover(CoverDevice):
"""Representation of a Raspberry GPIO cover."""
def __init__(self, name, relay_pin, state_pin, state_pull_mode,
relay_time):
"""Initialize the cover."""
self._name = name
self._state = False
self._relay_pin = relay_pin
self._state_pin = state_pin
self._state_pull_mode = state_pull_mode
self._relay_time = relay_time
rpi_gpio.setup_output(self._relay_pin)
rpi_gpio.setup_input(self._state_pin, self._state_pull_mode)
rpi_gpio.write_output(self._relay_pin, True)
@property
def unique_id(self):
"""Return the ID of this cover."""
return '{}.{}'.format(self.__class__, self._name)
@property
def name(self):
"""Return the name of the cover if any."""
return self._name
def update(self):
"""Update the state of the cover."""
self._state = rpi_gpio.read_input(self._state_pin)
@property
def is_closed(self):
"""Return true if cover is closed."""
return self._state
def _trigger(self):
"""Trigger the cover."""
rpi_gpio.write_output(self._relay_pin, False)
sleep(self._relay_time)
rpi_gpio.write_output(self._relay_pin, True)
def close_cover(self):
"""Close the cover."""
if not self.is_closed:
self._trigger()
def open_cover(self):
"""Open the cover."""
if self.is_closed:
self._trigger()
| apache-2.0 |
python-security/pyt | pyt/core/module_definitions.py | 1 | 4715 | """This module handles module definitions
which basically is a list of module definition."""
import ast
# Contains all project definitions for a program run
# Only used in framework_adaptor.py, but modified here
project_definitions = dict()
class ModuleDefinition():
"""Handling of a definition."""
module_definitions = None
name = None
node = None
path = None
def __init__(
self,
local_module_definitions,
name,
parent_module_name,
path
):
self.module_definitions = local_module_definitions
self.parent_module_name = parent_module_name
self.path = path
if parent_module_name:
if isinstance(parent_module_name, ast.alias):
self.name = parent_module_name.name + '.' + name
else:
self.name = parent_module_name + '.' + name
else:
self.name = name
def __str__(self):
name = 'NoName'
node = 'NoNode'
if self.name:
name = self.name
if self.node:
node = str(self.node)
return "Path:" + self.path + " " + self.__class__.__name__ + ': ' + ';'.join((name, node))
class LocalModuleDefinition(ModuleDefinition):
"""A local definition."""
pass
class ModuleDefinitions():
"""A collection of module definition.
Adds to the project definitions list.
"""
def __init__(
self,
import_names=None,
module_name=None,
is_init=False,
filename=None
):
"""Optionally set import names and module name.
Module name should only be set when it is a normal import statement.
"""
self.import_names = import_names
# module_name is sometimes ast.alias or a string
self.module_name = module_name
self.is_init = is_init
self.filename = filename
self.definitions = list()
self.classes = list()
self.import_alias_mapping = dict()
def append_if_local_or_in_imports(self, definition):
"""Add definition to list.
Handles local definitions and adds to project_definitions.
"""
if isinstance(definition, LocalModuleDefinition):
self.definitions.append(definition)
elif self.import_names == ["*"]:
self.definitions.append(definition)
elif self.import_names and definition.name in self.import_names:
self.definitions.append(definition)
elif (self.import_alias_mapping and definition.name in
self.import_alias_mapping.values()):
self.definitions.append(definition)
if definition.parent_module_name:
self.definitions.append(definition)
if definition.node not in project_definitions:
project_definitions[definition.node] = definition
def get_definition(self, name):
"""Get definitions by name."""
for definition in self.definitions:
if definition.name == name:
return definition
def set_definition_node(self, node, name):
"""Set definition by name."""
definition = self.get_definition(name)
if definition:
definition.node = node
def __str__(self):
module = 'NoModuleName'
if self.module_name:
module = self.module_name
if self.definitions:
if isinstance(module, ast.alias):
return (
'Definitions: "' + '", "'
.join([str(definition) for definition in self.definitions]) +
'" and module_name: ' + module.name +
' and filename: ' + str(self.filename) +
' and is_init: ' + str(self.is_init) + '\n')
return (
'Definitions: "' + '", "'
.join([str(definition) for definition in self.definitions]) +
'" and module_name: ' + module +
' and filename: ' + str(self.filename) +
' and is_init: ' + str(self.is_init) + '\n')
else:
if isinstance(module, ast.alias):
return (
'import_names is ' + str(self.import_names) +
' No Definitions, module_name: ' + str(module.name) +
' and filename: ' + str(self.filename) +
' and is_init: ' + str(self.is_init) + '\n')
return (
'import_names is ' + str(self.import_names) +
' No Definitions, module_name: ' + str(module) +
' and filename: ' + str(self.filename) +
' and is_init: ' + str(self.is_init) + '\n')
| gpl-2.0 |
mrquim/mrquimrepo | repo/script.module.exodus/lib/resources/lib/modules/tvmaze.py | 5 | 4098 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,json
from resources.lib.modules import cache
from resources.lib.modules import client
class tvMaze:
def __init__(self, show_id = None):
self.api_url = 'http://api.tvmaze.com/%s%s'
self.show_id = show_id
def showID(self, show_id = None):
if (show_id != None):
self.show_id = show_id
return show_id
return self.show_id
def request(self, endpoint, query = None):
try:
# Encode the queries, if there is any...
if (query != None):
query = '?' + urllib.urlencode(query)
else:
query = ''
# Make the request
request = self.api_url % (endpoint, query)
# Send the request and get the response
# Get the results from cache if available
response = cache.get(client.request, 24, request)
# Retrun the result as a dictionary
return json.loads(response)
except:
pass
return {}
def showLookup(self, type, id):
try:
result = self.request('lookup/shows', {type: id})
# Storing the show id locally
if ('id' in result):
self.show_id = result['id']
return result
except:
pass
return {}
def shows(self, show_id = None, embed = None):
try:
if (not self.showID(show_id)):
raise Exception()
result = self.request('shows/%d' % self.show_id)
# Storing the show id locally
if ('id' in result):
self.show_id = result['id']
return result
except:
pass
return {}
def showSeasons(self, show_id = None):
try:
if (not self.showID(show_id)):
raise Exception()
result = self.request('shows/%d/seasons' % int( self.show_id ))
if (len(result) > 0 and 'id' in result[0]):
return result
except:
pass
return []
def showSeasonList(self, show_id):
return {}
def showEpisodeList(self, show_id = None, specials = False):
try:
if (not self.showID(show_id)):
raise Exception()
result = self.request('shows/%d/episodes' % int( self.show_id ), 'specials=1' if specials else '')
if (len(result) > 0 and 'id' in result[0]):
return result
except:
pass
return []
def episodeAbsoluteNumber(self, thetvdb, season, episode):
try:
url = 'http://thetvdb.com/api/%s/series/%s/default/%01d/%01d' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, int(season), int(episode))
return int(client.parseDOM(client.request(url), 'absolute_number')[0])
except:
pass
return episode
def getTVShowTranslation(self, thetvdb, lang):
try:
url = 'http://thetvdb.com/api/%s/series/%s/%s.xml' % ('MUQ2MkYyRjkwMDMwQzQ0NA=='.decode('base64'), thetvdb, lang)
r = client.request(url)
title = client.parseDOM(r, 'SeriesName')[0]
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
return title
except:
pass
| gpl-2.0 |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/xmlrpc/softwarecenteragent.py | 1 | 1672 | # Copyright 2010-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""XMLRPC APIs for person set."""
__metaclass__ = type
__all__ = [
'SoftwareCenterAgentAPI',
]
from zope.component import getUtility
from zope.interface import implements
from lp.registry.interfaces.person import (
IPersonSet,
ISoftwareCenterAgentAPI,
ISoftwareCenterAgentApplication,
PersonCreationRationale,
TeamEmailAddressError,
)
from lp.services.identity.interfaces.account import AccountSuspendedError
from lp.services.webapp import LaunchpadXMLRPCView
from lp.xmlrpc import faults
class SoftwareCenterAgentAPI(LaunchpadXMLRPCView):
"""See `ISoftwareCenterAgentAPI`."""
implements(ISoftwareCenterAgentAPI)
def getOrCreateSoftwareCenterCustomer(self, openid_identifier, email,
full_name):
try:
person, db_updated = getUtility(
IPersonSet).getOrCreateByOpenIDIdentifier(
openid_identifier.decode('ASCII'), email, full_name,
PersonCreationRationale.SOFTWARE_CENTER_PURCHASE,
"when purchasing an application via Software Center.")
except AccountSuspendedError:
return faults.AccountSuspended(openid_identifier)
except TeamEmailAddressError:
return faults.TeamEmailAddress(email, openid_identifier)
return person.name
class SoftwareCenterAgentApplication:
"""Software center agent end-point."""
implements(ISoftwareCenterAgentApplication)
title = "Software Center Agent API"
| agpl-3.0 |
harigowtham/sos | sos/plugins/kdump.py | 12 | 1514 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class KDump(Plugin):
"""Kdump crash dumps
"""
plugin_name = "kdump"
profiles = ('system', 'debug')
def setup(self):
self.add_copy_spec([
"/proc/cmdline"
])
class RedHatKDump(KDump, RedHatPlugin):
files = ('/etc/kdump.conf',)
packages = ('kexec-tools',)
def setup(self):
self.add_copy_spec([
"/etc/kdump.conf",
"/etc/udev/rules.d/*kexec.rules",
"/var/crash/*/vmcore-dmesg.txt"
])
class DebianKDump(KDump, DebianPlugin, UbuntuPlugin):
files = ('/etc/default/kdump-tools',)
packages = ('kdump-tools',)
def setup(self):
self.add_copy_spec([
"/etc/default/kdump-tools"
])
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
BrainTech/openbci | obci/analysis/csp/MLogit.py | 1 | 11792 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""This is a class for Multinomial Logit Regression
Class uses scipy.optimize package for minimalization of a cost function.
The gradient of the cost function is passed to the minimizer.
Piotr Milanowski, November 2011, Warsaw
"""
from scipy.optimize import fmin_ncg, fmin_bfgs, fmin
import numpy as np
import matplotlib.pyplot as plt
def mix(x1, x2, deg=6):
out = np.zeros([len(x1), sum(range(deg+2))])
k = 0
for i in xrange(deg+1):
for j in range(i+1):
out[:,k] = x1**(i-j)*x2**(j)
k += 1
return out
class logit(object):
"""This is a class for a normal two-class logistic regression
The hypothesis of this regression is a sigmoid (logistic, logit) function.
It returns the probability of the data belonging to the first class.
The minimalization of a cost function is based on NCG algorithm from scipy.optimize package.
The regression can account the regularization factors.
"""
def __init__(self, data, classes, labels=None):
"""Initialization of data
A column of ones is added to the data array.
Parameters:
===========
data : 2darray
NxM array. Rows of this array represent data points, columns represent features.
classes : 1darray
a N dimensional vector of classes. Each class is represented by either 0 or 1.
class_dict [= None] : dictionary
a 2 element dictionary that maps classses to their names.
Example:
=========
>>>X = np.random.rand(20, 4) #data
>>>Y = np.random.randint(0,2,20) #classes
>>>labels = ['class 1','class 2']
>>>MLogit.logit(X, Y, labels)
"""
self.dataNo, self.featureNo = data.shape
if len(classes) != self.dataNo:
raise ValueError, 'Not every data point has its target lable!'
#Adding a columns of 1s and normalizing data - NO NORMALIZATION NEEDED
self.X = np.concatenate((np.ones([self.dataNo, 1]), data), axis = 1)
self.Y = classes
def _sigmoid(self, z):
"""This returns the value of a sigmoid function.
Sigmoid/Logistic/Logit finction looks like this:
f(z) = over{1}{1 + exp(-z)}
Parameters:
===========
z : ndarray
the parameter of the function
Returns:
sig : ndarray
values of sigmoid function at z
"""
return 1/(1 + np.exp(-z))
def cost_function(self, theta, reg = 0):
"""The cost function of logit regression model
It looks like this:
J(theta) = -((1/M)*sum_{i=1}^{M}(y_i*log(h(theta;x_i))+(1-y_i)*log(1-h(theta;x_i)))) +
+ (reg/2*m)sum_{i=1}^{N}(theta_i)^2
Parameters:
===========
theta : 1darray
the array of parameters. It's a (N+1) dimensional vector
reg [= 0] : float
the regularization parameter. This parameter penalizes theta being too big (overfitting)
Returns:
========
J : float
the value of cost function for given theta
"""
z = self._sigmoid(np.dot(self.X, theta))
regular = (reg/(2.0*self.dataNo))*sum(theta[1:]*theta[1:])
J = self.Y * np.log(z) + (1 - self.Y)*np.log(1 - z)
J = -(1.0 / self.dataNo) * sum(J)
return regular + J
def gradient_function(self, theta, reg = 0):
"""The gradient of cost function
The gradient looks like this:
g[0] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^0
g[j] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^j - theta[j]*reg/N
Parameters:
===========
theta : 1darray
the vector of parameters
reg : float
the regularization parameter
Returns:
========
fprime : 1darray
the gradient of cost function.
"""
gradient = np.zeros(self.featureNo + 1)
N = 1.0 / self.dataNo
z = np.dot(self.X, theta)
cost = self._sigmoid(z) - self.Y
# gradient[0] = N * sum(cost * self.X[:, 0])
# for j in xrange(self.featureNo):
# gradient[j] = N * sum(cost * self.X[:, j]) - reg * N * theta[j]
gradient = N * np.dot(cost, self.X)
gradient[1:] += reg * N * theta[1:]
return gradient
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Minimizing function
Based on NCG function from scipy.optimize package
Parameters:
===========
maxiter : int
maximal number of iterations
reg [= 0] : float
regularization parameter
initial_gueas [= None] : 1darray
a vector of #features + 1 size. If None zeros will be asumed.
Returns:
========
theta : 1darray
optimal model parameters
"""
if initial_gues is None:
initial_gues = np.zeros(self.featureNo + 1)
out = fmin_bfgs(self.cost_function, initial_gues, \
self.gradient_function, args = ([reg]))
self.theta = out
return out
def predict(self, x, val=0.9):
"""For prediction of x
Returns predicted probability of x being in class 1
"""
x = np.insert(x, 0, 1) #inserting one at the beginning
z = np.dot(x, self.theta)
#if self._sigmoid(z) >=val:
#return 1
#else:
#return 0
return self._sigmoid(z)
def plot_features(self, show=True):
y = self.Y
idx = np.argsort(y)
x = self.X[idx, :]
y = y[idx]
N, feats = x.shape
if feats == 3:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
plt.plot(x1[:,1],x1[:,2],'ro',x2[:,1],x2[:,2],'go')
for x in np.arange(-5, 5, 0.5):
for y in np.arange(-3, 3, 0.5):
if self.predict(np.array([x,y])) <=0.5:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.legend(('Class 0','Class 1'))
if show:
plt.show()
elif feats == 2:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
for x in np.arange(x1.min(), x1.max(), 0.1):
for y in np.arange(x2.min(), x2.max(), 0.1):
if self.predict(np.array([x,y])) <=0.01:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.plot(x1[:,1],'ro',x2[:,1],'go')
if show:
plt.show()
else:
print "More than 2 dimmensions",x.shape
# def plot_fitted(self):
# N, feats = self.X.shape
# if feats == 3:
# x1 = se
def __normalization(self, data):
"""Function normalizes the data
Normalization is done by subtracting the mean of each column from each column member
and dividing by the column variance.
Parameters:
===========
data : 2darray
the data array
Returns:
========
norms : 2darray
normalized values
"""
mean = data.mean(axis = 0)
variance = data.std(axis = 0)
return (data - mean) / variance
class mlogit(logit):
"""This is a multivariate variation of logit model
"""
def __init__(self, data, classes, labels=None):
"""See logit description"""
super(mlogit, self).__init__(data, classes, labels)
self.classesNo, classesIdx = np.unique(classes, return_inverse = True)
self.count_table = np.zeros([len(classes), len(self.classesNo)])
self.count_table[range(len(classes)), classesIdx] = 1.0
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Fitting logit model for multiclass case"""
theta = np.zeros([self.featureNo + 1, len(self.classesNo)])
for i in range(len(self.classesNo)):
self.Y = self.count_table[:,i]
theta[:, i] = super(mlogit, self).fit(maxiter, reg = reg, initial_gues = initial_gues)
self.theta = theta
return theta
def predict(self, x, val=0.9):
"""Class prediction"""
x = np.insert(x, 0, 1)
z = np.dot(x, self.theta)
probs = super(mlogit, self)._sigmoid(z)
idx = np.argmax(probs)
if probs[idx] >= val:
return self.classesNo[idx]
else:
return None
def plot_features(self):
cn = len(self.classesNo)
idx = np.argsort(self.Y)
y = self.Y[idx]
x = self.X[idx,:]
classes = []
if x.shape[1] == 3:
for i in range(cn):
beg, end = np.where(y==i)[0][[0,-1]]
plt.plot(x[beg:end+1, 1], x[beg:end +1, 2],'o')
classes.append('Class'+str(i))
plt.legend(classes)
plt.show()
else:
print "More than 2 dimmesions"
#class diagnostics(object):
# def __init__(self, classifier_obj, division=[0.6, 0.2, 0.2]):
# self.obj = classifier_obj
# self.div = division
# self.N, self.ft = self.obj.dataNo, self.obj.featureNo
# self.cvNo = self.N * division[1]
# self.testNo = self.N * division[2]
# self.trainNo = self.N * division[0]
# def diagnose(self, iters, reg, odrer=1, val=0.9):
# idx = np.linspace(0, self.N-1, self.N)
# TP, FP, TN, FN
# train_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# cv_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# test_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# X = self.obj.X
# Y = self.obj.Y
# for i in xrange(iters):
# np.random.shuffle(idx)
# train_set = X[idx[:self.trainNo], :]
# cv_set = X[idx[self.trainNo:self.trainNo+self.cvNo], :]
# test_set = X[idx[self.trainNo+self.cvNo:], :]
# classes_train = Y[idx[:self.trainNo], :]
# classes_cv = Y[idx[self.trainNo:self.trainNo+self.cvNo], :]
# classes_test = Y[idx[self.trainNo+self.cvNo:], :]
# Training
# self.obj.X = train_set
# self.obj.Y = classes_train
# self.obj.fit(100)
# for j, row in enumerate(train_set):
# cl = self.obj.predict(row, val)
# if cl == classes_train[j]:
# train_ok['tp'] += 1
# elif cl is None:
# train_ok['fn'] += 1
# else:
# train_ok['fp'] += 1
# Crossvalidation
# for j, row in enumerate(cv_set):
# cl = self.obj.predict(row, val)
# if cl == classes_cv[j]:
# cv_ok['tp'] += 1
# elif cl in None:
# cv_ok['fn'] += 1
# else:
# cv_ok['fp'] += 1
# Test set
# for j, row in enumerate(test_set):
# cl = self.obj.predict(row, val)
# if cl == classes_test[j]:
# test_ok['tp'] += 1
# elif cl is None:
# test_ok['fn'] += 1
# else:
# test_ok['fp'] += 1
# def power_set(self, lst, l):
# """Create a powerset of a list for given length"""
# r = [[]]
# for e in lst:
# r.extend([s + [e] for s in r])
# return set([j for j in r if len(j) <= l])
# def next_order(self, kernel, next_o):
# def make_order(self, p):
# init_featsNo = self.featNo
| gpl-3.0 |
ccomb/OpenUpgrade | addons/hr_timesheet_sheet/wizard/__init__.py | 443 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_current
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vijayanandau/KnowledgeShare | makahiki/apps/widgets/bonus_points/migrations/0004_auto__add_field_bonuspoint_claim_date__add_field_bonuspoint_create_dat.py | 7 | 4911 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BonusPoint.claim_date'
db.add_column('bonus_points_bonuspoint', 'claim_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'BonusPoint.create_date'
db.add_column('bonus_points_bonuspoint', 'create_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2012, 8, 9)), keep_default=False)
def backwards(self, orm):
# Deleting field 'BonusPoint.claim_date'
db.delete_column('bonus_points_bonuspoint', 'claim_date')
# Deleting field 'BonusPoint.create_date'
db.delete_column('bonus_points_bonuspoint', 'create_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 8, 9, 12, 8, 23, 650541)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 8, 9, 12, 8, 23, 650386)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bonus_points.bonuspoint': {
'Meta': {'object_name': 'BonusPoint'},
'claim_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'create_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2012, 8, 9)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'point_value': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bonus_points']
| mit |
cslosiu/TweetList | oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py | 35 | 8389 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class ResourceOwnerPasswordCredentialsGrant(GrantTypeBase):
"""`Resource Owner Password Credentials Grant`_
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type and only allow it when other flows are not
viable.
This grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token::
+----------+
| Resource |
| Owner |
| |
+----------+
v
| Resource Owner
(A) Password Credentials
|
v
+---------+ +---------------+
| |>--(B)---- Resource Owner ------->| |
| | Password Credentials | Authorization |
| Client | | Server |
| |<--(C)---- Access Token ---------<| |
| | (w/ Optional Refresh Token) | |
+---------+ +---------------+
Figure 5: Resource Owner Password Credentials Flow
The flow illustrated in Figure 5 includes the following steps:
(A) The resource owner provides the client with its username and
password.
(B) The client requests an access token from the authorization
server's token endpoint by including the credentials received
from the resource owner. When making the request, the client
authenticates with the authorization server.
(C) The authorization server authenticates the client and validates
the resource owner credentials, and if valid, issues an access
token.
.. _`Resource Owner Password Credentials Grant`: http://tools.ietf.org/html/rfc6749#section-4.3
"""
def __init__(self, request_validator=None, refresh_token=True):
"""
If the refresh_token keyword argument is False, do not return
a refresh token in the response.
"""
self.request_validator = request_validator or RequestValidator()
self.refresh_token = refresh_token
def create_token_response(self, request, token_handler):
"""Return token or error in json format.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, self.refresh_token)
log.debug('Issuing token %r to client id %r (%r) and username %s.',
token, request.client_id, request.client, request.username)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body:
grant_type
REQUIRED. Value MUST be set to "password".
username
REQUIRED. The resource owner username.
password
REQUIRED. The resource owner password.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_.
The authorization server MUST:
o require client authentication for confidential clients or for any
client that was issued client credentials (or with other
authentication requirements),
o authenticate the client if client authentication is included, and
o validate the resource owner password credentials using its
existing password validation algorithm.
Since this access token request utilizes the resource owner's
password, the authorization server MUST protect the endpoint against
brute force attacks (e.g., using rate-limitation or generating
alerts).
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
for param in ('grant_type', 'username', 'password'):
if not getattr(request, param, None):
raise errors.InvalidRequestError(
'Request is missing %s parameter.' % param, request=request)
for param in ('grant_type', 'username', 'password', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
# This error should rarely (if ever) occur if requests are routed to
# grant type handlers based on the grant_type parameter.
if not request.grant_type == 'password':
raise errors.UnsupportedGrantTypeError(request=request)
log.debug('Validating username %s.', request.username)
if not self.request_validator.validate_user(request.username,
request.password, request.client, request):
raise errors.InvalidGrantError(
'Invalid credentials given.', request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError(
'Validate user must set the '
'request.client.client_id attribute '
'in authenticate_client.')
log.debug('Authorizing access to user %r.', request.user)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
if request.client:
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
| apache-2.0 |
koorukuroo/networkx_for_unicode | build/lib/networkx/algorithms/shortest_paths/tests/test_astar.py | 76 | 4927 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from random import random, choice
class TestAStar:
def setUp(self):
self.XG=nx.DiGraph()
self.XG.add_edges_from([('s','u',{'weight':10}),
('s','x',{'weight':5}),
('u','v',{'weight':1}),
('u','x',{'weight':2}),
('v','y',{'weight':1}),
('x','u',{'weight':3}),
('x','v',{'weight':5}),
('x','y',{'weight':2}),
('y','s',{'weight':7}),
('y','v',{'weight':6})])
def test_random_graph(self):
def dist(a, b):
(x1, y1) = a
(x2, y2) = b
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
G = nx.Graph()
points = [(random(), random()) for _ in range(100)]
# Build a path from points[0] to points[-1] to be sure it exists
for p1, p2 in zip(points[:-1], points[1:]):
G.add_edge(p1, p2, weight=dist(p1, p2))
# Add other random edges
for _ in range(100):
p1, p2 = choice(points), choice(points)
G.add_edge(p1, p2, weight=dist(p1, p2))
path = nx.astar_path(G, points[0], points[-1], dist)
assert path == nx.dijkstra_path(G, points[0], points[-1])
def test_astar_directed(self):
assert nx.astar_path(self.XG,'s','v')==['s', 'x', 'u', 'v']
assert nx.astar_path_length(self.XG,'s','v')==9
def test_astar_multigraph(self):
G=nx.MultiDiGraph(self.XG)
assert_raises((TypeError,nx.NetworkXError),
nx.astar_path, [G,'s','v'])
assert_raises((TypeError,nx.NetworkXError),
nx.astar_path_length, [G,'s','v'])
def test_astar_undirected(self):
GG=self.XG.to_undirected()
# make sure we get lower weight
# to_undirected might choose either edge with weight 2 or weight 3
GG['u']['x']['weight']=2
GG['y']['v']['weight'] = 2
assert_equal(nx.astar_path(GG,'s','v'),['s', 'x', 'u', 'v'])
assert_equal(nx.astar_path_length(GG,'s','v'),8)
def test_astar_directed2(self):
XG2=nx.DiGraph()
XG2.add_edges_from([[1,4,{'weight':1}],
[4,5,{'weight':1}],
[5,6,{'weight':1}],
[6,3,{'weight':1}],
[1,3,{'weight':50}],
[1,2,{'weight':100}],
[2,3,{'weight':100}]])
assert nx.astar_path(XG2,1,3)==[1, 4, 5, 6, 3]
def test_astar_undirected2(self):
XG3=nx.Graph()
XG3.add_edges_from([ [0,1,{'weight':2}],
[1,2,{'weight':12}],
[2,3,{'weight':1}],
[3,4,{'weight':5}],
[4,5,{'weight':1}],
[5,0,{'weight':10}] ])
assert nx.astar_path(XG3,0,3)==[0, 1, 2, 3]
assert nx.astar_path_length(XG3,0,3)==15
def test_astar_undirected3(self):
XG4=nx.Graph()
XG4.add_edges_from([ [0,1,{'weight':2}],
[1,2,{'weight':2}],
[2,3,{'weight':1}],
[3,4,{'weight':1}],
[4,5,{'weight':1}],
[5,6,{'weight':1}],
[6,7,{'weight':1}],
[7,0,{'weight':1}] ])
assert nx.astar_path(XG4,0,2)==[0, 1, 2]
assert nx.astar_path_length(XG4,0,2)==4
# >>> MXG4=NX.MultiGraph(XG4)
# >>> MXG4.add_edge(0,1,3)
# >>> NX.dijkstra_path(MXG4,0,2)
# [0, 1, 2]
def test_astar_w1(self):
G=nx.DiGraph()
G.add_edges_from([('s','u'), ('s','x'), ('u','v'), ('u','x'),
('v','y'), ('x','u'), ('x','w'), ('w', 'v'), ('x','y'),
('y','s'), ('y','v')])
assert nx.astar_path(G,'s','v')==['s', 'u', 'v']
assert nx.astar_path_length(G,'s','v')== 2
@raises(nx.NetworkXNoPath)
def test_astar_nopath(self):
p = nx.astar_path(self.XG,'s','moon')
def test_cycle(self):
C=nx.cycle_graph(7)
assert nx.astar_path(C,0,3)==[0, 1, 2, 3]
assert nx.dijkstra_path(C,0,4)==[0, 6, 5, 4]
def test_orderable(self):
class UnorderableClass: pass
node_1 = UnorderableClass()
node_2 = UnorderableClass()
node_3 = UnorderableClass()
node_4 = UnorderableClass()
G = nx.Graph()
G.add_edge(node_1, node_2)
G.add_edge(node_1, node_3)
G.add_edge(node_2, node_4)
G.add_edge(node_3, node_4)
path=nx.algorithms.shortest_paths.astar.astar_path(G, node_1, node_4)
| bsd-3-clause |
baylee/django | tests/defer/tests.py | 25 | 11390 | from __future__ import unicode_literals
from django.db.models.query_utils import InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = len(obj.get_deferred_fields())
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
# The related_id is alawys set even if it's not fetched from the DB,
# so pk and related_id are not deferred.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 2)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed '
'using select_related at the same time.'
)
with self.assertRaisesMessage(InvalidQuery, msg):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed using '
'select_related at the same time.'
)
with self.assertRaisesMessage(InvalidQuery, msg):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# select_related() overrides defer().
with self.assertNumQueries(1):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
| bsd-3-clause |
pmarques/ansible | test/units/module_utils/urls/test_urls.py | 74 | 4040 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils import urls
from ansible.module_utils._text import to_native
import pytest
def test_build_ssl_validation_error(mocker):
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=False)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'python >= 2.7.9' in to_native(excinfo.value)
assert 'the python executable used' in to_native(excinfo.value)
assert 'urllib3' in to_native(excinfo.value)
assert 'python >= 2.6' in to_native(excinfo.value)
assert 'validate_certs=False' in to_native(excinfo.value)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'validate_certs=False' in to_native(excinfo.value)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=False)
mocker.patch.object(urls, 'HAS_URLLIB3_PYOPENSSLCONTEXT', new=True)
mocker.patch.object(urls, 'HAS_URLLIB3_SSL_WRAP_SOCKET', new=True)
mocker.patch.object(urls, 'HAS_SSLCONTEXT', new=True)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc=None)
assert 'urllib3' not in to_native(excinfo.value)
with pytest.raises(urls.SSLValidationError) as excinfo:
urls.build_ssl_validation_error('hostname', 'port', 'paths', exc='BOOM')
assert 'BOOM' in to_native(excinfo.value)
def test_maybe_add_ssl_handler(mocker):
mocker.patch.object(urls, 'HAS_SSL', new=False)
with pytest.raises(urls.NoSSLError):
urls.maybe_add_ssl_handler('https://ansible.com/', True)
mocker.patch.object(urls, 'HAS_SSL', new=True)
url = 'https://user:[email protected]/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 443
url = 'https://ansible.com:4433/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 4433
url = 'https://user:[email protected]:4433/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 4433
url = 'https://ansible.com/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == 'ansible.com'
assert handler.port == 443
url = 'http://ansible.com/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler is None
url = 'https://[2a00:16d8:0:7::205]:4443/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == '2a00:16d8:0:7::205'
assert handler.port == 4443
url = 'https://[2a00:16d8:0:7::205]/'
handler = urls.maybe_add_ssl_handler(url, True)
assert handler.hostname == '2a00:16d8:0:7::205'
assert handler.port == 443
def test_basic_auth_header():
header = urls.basic_auth_header('user', 'passwd')
assert header == b'Basic dXNlcjpwYXNzd2Q='
def test_ParseResultDottedDict():
url = 'https://ansible.com/blog'
parts = urls.urlparse(url)
dotted_parts = urls.ParseResultDottedDict(parts._asdict())
assert parts[0] == dotted_parts.scheme
assert dotted_parts.as_list() == list(parts)
def test_unix_socket_patch_httpconnection_connect(mocker):
unix_conn = mocker.patch.object(urls.UnixHTTPConnection, 'connect')
conn = urls.httplib.HTTPConnection('ansible.com')
with urls.unix_socket_patch_httpconnection_connect():
conn.connect()
assert unix_conn.call_count == 1
| gpl-3.0 |
shlee89/athena | tools/test/topos/tower.py | 39 | 1460 | #!/usr/bin/env python
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.node import RemoteController, OVSKernelSwitch
from mininet.log import setLogLevel
class TowerTopo( Topo ):
"""Create a tower topology"""
def build( self, k=4, h=6 ):
spines = []
leaves = []
hosts = []
# Create the two spine switches
spines.append(self.addSwitch('s1'))
spines.append(self.addSwitch('s2'))
# Create two links between the spine switches
self.addLink(spines[0], spines[1])
#TODO add second link between spines when multi-link topos are supported
#self.addLink(spines[0], spines[1])
# Now create the leaf switches, their hosts and connect them together
i = 1
c = 0
while i <= k:
leaves.append(self.addSwitch('s1%d' % i))
for spine in spines:
self.addLink(leaves[i-1], spine)
j = 1
while j <= h:
hosts.append(self.addHost('h%d%d' % (i, j)))
self.addLink(hosts[c], leaves[i-1])
j+=1
c+=1
i+=1
topos = { 'tower': TowerTopo }
def run():
topo = TowerTopo()
net = Mininet( topo=topo, controller=RemoteController, autoSetMacs=True )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
| apache-2.0 |
aonotas/chainer | chainer/functions/array/select_item.py | 3 | 3414 | import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class SelectItem(function_node.FunctionNode):
"""Select elements stored in given indices."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
t_type.dtype.kind == 'i',
x_type.ndim == 2,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
)
def forward(self, inputs):
self.retain_inputs((1,))
x, t = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
if chainer.is_debug():
if not ((0 <= t).all() and
(t < x.shape[1]).all()):
msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`'
raise ValueError(msg)
xp = cuda.get_array_module(x)
if xp is numpy:
# This code is equivalent to `t.choose(x.T)`, but `numpy.choose`
# does not work when `x.shape[1] > 32`.
return x[six.moves.range(t.size), t],
else:
y = cuda.elementwise(
'S t, raw T x',
'T y',
'int ind[] = {i, t}; y = x[ind];',
'getitem_fwd'
)(t, x)
return y,
def backward(self, indexes, gy):
t = self.get_retained_inputs()[0]
ret = []
if 0 in indexes:
gx = Assign(self._in_shape, self._in_dtype, t).apply(gy)[0]
ret.append(gx)
if 1 in indexes:
ret.append(None)
return ret
class Assign(function_node.FunctionNode):
def __init__(self, shape, dtype, t):
self.shape = shape
self.dtype = dtype
self.t = t.data
def forward_cpu(self, inputs):
gx = numpy.zeros(self.shape, self.dtype)
gx[six.moves.range(self.t.size), self.t] = inputs[0]
return gx,
def forward_gpu(self, inputs):
gx = cuda.cupy.zeros(self.shape, self.dtype)
gx = cuda.elementwise(
'S t, T gloss',
'raw T gx',
'int ind[] = {i, t}; gx[ind] = gloss;',
'getitem_bwd'
)(self.t, inputs[0], gx)
return gx,
def backward(self, indexes, gy):
return SelectItem().apply((gy[0], self.t))
def select_item(x, t):
"""Select elements stored in given indices.
This function returns ``t.choose(x.T)``, that means
``y[i] == x[i, t[i]]`` for all ``i``.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable storing arrays. A two-dimensional float array.
t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable storing index numbers. A one-dimensional int array.
Length of the ``t`` should be equal to ``x.shape[0]``.
Returns:
~chainer.Variable: Variable that holds ``t``-th element of ``x``.
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [3, 4, 5]], np.float32)
>>> t = np.array([0, 2], np.int32)
>>> y = F.select_item(x, t)
>>> y.shape
(2,)
>>> y.data
array([0., 5.], dtype=float32)
"""
return SelectItem().apply((x, t))[0]
| mit |
jeremiahyan/odoo | odoo/addons/base/populate/res_company.py | 1 | 1638 | import collections
import logging
from odoo import models, Command
from odoo.tools import populate
_logger = logging.getLogger(__name__)
class Partner(models.Model):
_inherit = "res.company"
_populate_sizes = {
'small': 5,
'medium': 10,
'large': 50,
}
def _populate_factories(self):
# remaining: paperformat_id, parent_id, partner_id, favicon, font, report_header, external_report_layout_id, report_footer
def get_name(values=None, counter=0, **kwargs):
return 'company_%s_%s' % (counter, self.env['res.currency'].browse(values['currency_id']).name)
active_currencies = self.env['res.currency'].search([('active', '=', True)]).ids
return [
('name', populate.constant('company_{counter}')),
('sequence', populate.randint(0, 100)),
('company_registry', populate.iterate([False, 'company_registry_{counter}'])),
('base_onboarding_company_state', populate.iterate(
[False] + [e[0] for e in type(self).base_onboarding_company_state.selection])),
('primary_color', populate.iterate([False, '', '#ff7755'])),
('secondary_color', populate.iterate([False, '', '#ffff55'], seed='primary_color')),
('currency_id', populate.iterate(active_currencies)),
('name', populate.compute(get_name)),
]
def _populate(self, size):
records = super()._populate(size)
self.env.ref('base.user_admin').write({'company_ids': [Command.link(rec.id) for rec in records]}) # add all created companies on user admin
return records
| gpl-3.0 |
alperyeg/elephant | elephant/test/test_spike_train_generation.py | 1 | 42885 | # -*- coding: utf-8 -*-
"""
Unit tests for the spike_train_generation module.
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import os
import sys
import unittest
import warnings
import neo
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import quantities as pq
from scipy.stats import expon
from scipy.stats import kstest, poisson
import elephant.spike_train_generation as stgen
from elephant.statistics import isi, instantaneous_rate
from elephant import kernels
python_version_major = sys.version_info.major
def pdiff(a, b):
"""Difference between a and b as a fraction of a
i.e. abs((a - b)/a)
"""
return abs((a - b) / a)
class AnalogSignalThresholdDetectionTestCase(unittest.TestCase):
def setUp(self):
# Load membrane potential simulated using Brian2
# according to make_spike_extraction_test_data.py.
curr_dir = os.path.dirname(os.path.realpath(__file__))
raw_data_file_loc = os.path.join(
curr_dir, 'spike_extraction_test_data.txt')
raw_data = []
with open(raw_data_file_loc, 'r') as f:
for x in f.readlines():
raw_data.append(float(x))
self.vm = neo.AnalogSignal(
raw_data, units=pq.V, sampling_period=0.1 * pq.ms)
self.true_time_stamps = [0.0123, 0.0354, 0.0712, 0.1191, 0.1694,
0.2200, 0.2711] * pq.s
def test_threshold_detection(self):
# Test whether spikes are extracted at the correct times from
# an analog signal.
spike_train = stgen.threshold_detection(self.vm)
try:
len(spike_train)
# Handles an error in Neo related to some zero length
# spike trains being treated as unsized objects.
except TypeError:
warnings.warn(
("The spike train may be an unsized object. This may be"
" related to an issue in Neo with some zero-length SpikeTrain"
" objects. Bypassing this by creating an empty SpikeTrain"
" object."))
spike_train = neo.SpikeTrain([], t_start=spike_train.t_start,
t_stop=spike_train.t_stop,
units=spike_train.units)
# Does threshold_detection gives the correct number of spikes?
self.assertEqual(len(spike_train), len(self.true_time_stamps))
# Does threshold_detection gives the correct times for the spikes?
try:
assert_array_almost_equal(spike_train, self.true_time_stamps)
except AttributeError: # If numpy version too old to have allclose
self.assertTrue(np.array_equal(spike_train, self.true_time_stamps))
def test_peak_detection_threshold(self):
# Test for empty SpikeTrain when threshold is too high
result = stgen.threshold_detection(self.vm, threshold=30 * pq.mV)
self.assertEqual(len(result), 0)
class AnalogSignalPeakDetectionTestCase(unittest.TestCase):
def setUp(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
raw_data_file_loc = os.path.join(
curr_dir, 'spike_extraction_test_data.txt')
raw_data = []
with open(raw_data_file_loc, 'r') as f:
for x in f.readlines():
raw_data.append(float(x))
self.vm = neo.AnalogSignal(
raw_data, units=pq.V, sampling_period=0.1 * pq.ms)
self.true_time_stamps = [0.0124, 0.0354, 0.0713, 0.1192, 0.1695,
0.2201, 0.2711] * pq.s
def test_peak_detection_time_stamps(self):
# Test with default arguments
result = stgen.peak_detection(self.vm)
self.assertEqual(len(self.true_time_stamps), len(result))
self.assertIsInstance(result, neo.core.SpikeTrain)
try:
assert_array_almost_equal(result, self.true_time_stamps)
except AttributeError:
self.assertTrue(np.array_equal(result, self.true_time_stamps))
def test_peak_detection_threshold(self):
# Test for empty SpikeTrain when threshold is too high
result = stgen.peak_detection(self.vm, threshold=30 * pq.mV)
self.assertEqual(len(result), 0)
class AnalogSignalSpikeExtractionTestCase(unittest.TestCase):
def setUp(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
raw_data_file_loc = os.path.join(
curr_dir, 'spike_extraction_test_data.txt')
raw_data = []
with open(raw_data_file_loc, 'r') as f:
for x in f.readlines():
raw_data.append(float(x))
self.vm = neo.AnalogSignal(
raw_data, units=pq.V, sampling_period=0.1 * pq.ms)
self.first_spike = np.array([-0.04084546, -0.03892033, -0.03664779,
-0.03392689, -0.03061474, -0.02650277,
-0.0212756, -0.01443531, -0.00515365,
0.00803962, 0.02797951, -0.07,
-0.06974495, -0.06950466, -0.06927778,
-0.06906314, -0.06885969, -0.06866651,
-0.06848277, -0.06830773, -0.06814071,
-0.06798113, -0.06782843, -0.06768213,
-0.06754178, -0.06740699, -0.06727737,
-0.06715259, -0.06703235, -0.06691635])
def test_spike_extraction_waveform(self):
spike_train = stgen.spike_extraction(self.vm.reshape(-1),
interval=(-1 * pq.ms, 2 * pq.ms))
try:
assert_array_almost_equal(
spike_train.waveforms[0][0].magnitude.reshape(-1),
self.first_spike)
except AttributeError:
self.assertTrue(
np.array_equal(spike_train.waveforms[0][0].magnitude,
self.first_spike))
class HomogeneousPoissonProcessTestCase(unittest.TestCase):
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
for rate in [123.0 * pq.Hz, 0.123 * pq.kHz]:
for t_stop in [2345 * pq.ms, 2.345 * pq.s]:
# zero refractory period should act as no refractory period
for refractory_period in (None, 0 * pq.ms):
np.random.seed(seed=12345)
spiketrain = stgen.homogeneous_poisson_process(
rate, t_stop=t_stop,
refractory_period=refractory_period)
intervals = isi(spiketrain)
expected_mean_isi = 1. / rate.simplified
self.assertAlmostEqual(
expected_mean_isi.magnitude,
intervals.mean().simplified.magnitude,
places=3)
expected_first_spike = 0 * pq.ms
self.assertLess(
spiketrain[0] - expected_first_spike,
7 * expected_mean_isi)
expected_last_spike = t_stop
self.assertLess(expected_last_spike -
spiketrain[-1], 7 * expected_mean_isi)
# Kolmogorov-Smirnov test
D, p = kstest(
intervals.rescale(t_stop.units),
"expon",
# args are (loc, scale)
args=(0,
expected_mean_isi.rescale(t_stop.units)),
alternative='two-sided')
self.assertGreater(p, 0.001)
self.assertLess(D, 0.12)
def test_zero_refractory_period(self):
rate = 10 * pq.Hz
t_stop = 20 * pq.s
np.random.seed(27)
sp1 = stgen.homogeneous_poisson_process(rate, t_stop=t_stop,
as_array=True)
np.random.seed(27)
sp2 = stgen.homogeneous_poisson_process(rate, t_stop=t_stop,
refractory_period=0 * pq.ms,
as_array=True)
assert_array_almost_equal(sp1, sp2)
def test_t_start_and_t_stop(self):
rate = 10 * pq.Hz
t_start = 17 * pq.ms
t_stop = 2 * pq.s
for refractory_period in (None, 3 * pq.ms):
spiketrain = stgen.homogeneous_poisson_process(
rate=rate, t_start=t_start, t_stop=t_stop,
refractory_period=refractory_period)
self.assertEqual(spiketrain.t_start, t_start)
self.assertEqual(spiketrain.t_stop, t_stop)
def test_zero_rate(self):
for refractory_period in (None, 3 * pq.ms):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
"""
Catch RuntimeWarning: divide by zero encountered in true_divide
mean_interval = 1 / rate.magnitude, when rate == 0 Hz.
"""
sp = stgen.homogeneous_poisson_process(
rate=0 * pq.Hz, t_stop=10 * pq.s,
refractory_period=refractory_period)
self.assertEqual(sp.size, 0)
def test_nondecrease_spike_times(self):
for refractory_period in (None, 3 * pq.ms):
np.random.seed(27)
spiketrain = stgen.homogeneous_poisson_process(
rate=10 * pq.Hz, t_stop=1000 * pq.s,
refractory_period=refractory_period)
diffs = np.diff(spiketrain.times)
self.assertTrue((diffs >= 0).all())
def test_compare_with_as_array(self):
rate = 10 * pq.Hz
t_stop = 10 * pq.s
for refractory_period in (None, 3 * pq.ms):
np.random.seed(27)
spiketrain = stgen.homogeneous_poisson_process(
rate=rate, t_stop=t_stop, refractory_period=refractory_period)
self.assertIsInstance(spiketrain, neo.SpikeTrain)
np.random.seed(27)
spiketrain_array = stgen.homogeneous_poisson_process(
rate=rate, t_stop=t_stop, refractory_period=refractory_period,
as_array=True)
# don't check with isinstance: Quantity is a subclass of np.ndarray
self.assertTrue(isinstance(spiketrain_array, np.ndarray))
assert_array_almost_equal(spiketrain.times.magnitude,
spiketrain_array)
def test_effective_rate_refractory_period(self):
np.random.seed(27)
rate_expected = 10 * pq.Hz
refractory_period = 90 * pq.ms # 10 ms of effective ISI
spiketrain = stgen.homogeneous_poisson_process(
rate_expected, t_stop=1000 * pq.s,
refractory_period=refractory_period)
rate_obtained = len(spiketrain) / spiketrain.t_stop
rate_obtained = rate_obtained.simplified
self.assertAlmostEqual(rate_expected.simplified,
rate_obtained.simplified, places=1)
intervals = isi(spiketrain)
isi_mean_expected = 1. / rate_expected
self.assertAlmostEqual(isi_mean_expected.simplified,
intervals.mean().simplified, places=3)
def test_invalid(self):
rate = 10 * pq.Hz
for refractory_period in (None, 3 * pq.ms):
# t_stop < t_start
hpp = stgen.homogeneous_poisson_process
self.assertRaises(
ValueError, hpp, rate=rate, t_start=5 * pq.ms,
t_stop=1 * pq.ms, refractory_period=refractory_period)
# no units provided for rate, t_stop
self.assertRaises(ValueError, hpp, rate=10,
refractory_period=refractory_period)
self.assertRaises(ValueError, hpp, rate=rate, t_stop=5,
refractory_period=refractory_period)
# no units provided for refractory_period
self.assertRaises(ValueError, hpp, rate=rate, refractory_period=2)
class InhomogeneousGammaTestCase(unittest.TestCase):
def setUp(self):
rate_list = [[20]] * 1000 + [[200]] * 1000
self.rate_profile = neo.AnalogSignal(
rate_list * pq.Hz, sampling_period=0.001 * pq.s)
rate_0 = [[0]] * 1000
self.rate_profile_0 = neo.AnalogSignal(
rate_0 * pq.Hz, sampling_period=0.001 * pq.s)
rate_negative = [[-1]] * 1000
self.rate_profile_negative = neo.AnalogSignal(
rate_negative * pq.Hz, sampling_period=0.001 * pq.s)
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
np.random.seed(seed=12345)
shape_factor = 2.5
for rate in [self.rate_profile, self.rate_profile.rescale(pq.kHz)]:
spiketrain = stgen.inhomogeneous_gamma_process(
rate, shape_factor=shape_factor)
intervals = isi(spiketrain)
# Computing expected statistics and percentiles
expected_spike_count = (np.sum(
rate) * rate.sampling_period).simplified
percentile_count = poisson.ppf(.999, expected_spike_count)
expected_min_isi = (1 / np.min(rate))
expected_max_isi = (1 / np.max(rate))
percentile_min_isi = expon.ppf(.999, expected_min_isi)
percentile_max_isi = expon.ppf(.999, expected_max_isi)
# Testing (each should fail 1 every 1000 times)
self.assertLess(spiketrain.size, percentile_count)
self.assertLess(np.min(intervals), percentile_min_isi)
self.assertLess(np.max(intervals), percentile_max_isi)
# Testing t_start t_stop
self.assertEqual(rate.t_stop, spiketrain.t_stop)
self.assertEqual(rate.t_start, spiketrain.t_start)
# Testing type
spiketrain_as_array = stgen.inhomogeneous_gamma_process(
rate, shape_factor=shape_factor, as_array=True)
self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
# check error if rate has wrong format
self.assertRaises(
ValueError, stgen.inhomogeneous_gamma_process,
rate=[0.1, 2.],
shape_factor=shape_factor)
# check error if negative values in rate
self.assertRaises(
ValueError, stgen.inhomogeneous_gamma_process,
rate=neo.AnalogSignal([-0.1, 10.] * pq.Hz,
sampling_period=0.001 * pq.s),
shape_factor=shape_factor)
# check error if rate is empty
self.assertRaises(
ValueError, stgen.inhomogeneous_gamma_process,
rate=neo.AnalogSignal([] * pq.Hz, sampling_period=0.001 * pq.s),
shape_factor=shape_factor)
def test_recovered_firing_rate_profile(self):
np.random.seed(54)
t_start = 0 * pq.s
t_stop = 4 * np.round(np.pi, decimals=3) * pq.s # 2 full periods
sampling_period = 0.001 * pq.s
# an arbitrary rate profile
profile = 0.5 * (1 + np.sin(np.arange(t_start.item(), t_stop.item(),
sampling_period.item())))
time_generation = 0
n_trials = 200
rtol = 0.05 # 5% of deviation allowed
kernel = kernels.RectangularKernel(sigma=0.25 * pq.s)
for rate in (10 * pq.Hz, 100 * pq.Hz):
rate_profile = neo.AnalogSignal(rate * profile,
sampling_period=sampling_period)
# the recovered firing rate profile should not depend on the
# shape factor; here we test float and integer values of the shape
# factor: the method supports float values that is not trivial
# for inhomogeneous gamma process generation
for shape_factor in (1, 2.5, 10.):
spiketrains = \
[stgen.inhomogeneous_gamma_process(
rate_profile, shape_factor=shape_factor)
for _ in range(n_trials)]
rate_recovered = instantaneous_rate(
spiketrains,
sampling_period=sampling_period,
kernel=kernel,
t_start=t_start,
t_stop=t_stop, trim=True) / n_trials
rate_recovered = rate_recovered.flatten().magnitude
trim = (rate_profile.shape[0] - rate_recovered.shape[0]) // 2
rate_profile_valid = rate_profile.magnitude.squeeze()
rate_profile_valid = rate_profile_valid[trim: -trim - 1]
assert_allclose(rate_recovered, rate_profile_valid,
rtol=0, atol=rtol * rate.item())
class InhomogeneousPoissonProcessTestCase(unittest.TestCase):
def setUp(self):
rate_list = [[20]] * 1000 + [[200]] * 1000
self.rate_profile = neo.AnalogSignal(
rate_list * pq.Hz, sampling_period=0.001 * pq.s)
rate_0 = [[0]] * 1000
self.rate_profile_0 = neo.AnalogSignal(
rate_0 * pq.Hz, sampling_period=0.001 * pq.s)
rate_negative = [[-1]] * 1000
self.rate_profile_negative = neo.AnalogSignal(
rate_negative * pq.Hz, sampling_period=0.001 * pq.s)
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
np.random.seed(seed=12345)
for rate in (self.rate_profile, self.rate_profile.rescale(pq.kHz)):
for refractory_period in (3 * pq.ms, None):
spiketrain = stgen.inhomogeneous_poisson_process(
rate, refractory_period=refractory_period)
intervals = isi(spiketrain)
# Computing expected statistics and percentiles
expected_spike_count = (np.sum(
rate) * rate.sampling_period).simplified
percentile_count = poisson.ppf(.999, expected_spike_count)
expected_min_isi = (1 / np.min(rate))
expected_max_isi = (1 / np.max(rate))
percentile_min_isi = expon.ppf(.999, expected_min_isi)
percentile_max_isi = expon.ppf(.999, expected_max_isi)
# Check that minimal ISI is greater than the refractory_period
if refractory_period is not None:
self.assertGreater(np.min(intervals), refractory_period)
# Testing (each should fail 1 every 1000 times)
self.assertLess(spiketrain.size, percentile_count)
self.assertLess(np.min(intervals), percentile_min_isi)
self.assertLess(np.max(intervals), percentile_max_isi)
# Testing t_start t_stop
self.assertEqual(rate.t_stop, spiketrain.t_stop)
self.assertEqual(rate.t_start, spiketrain.t_start)
# Testing type
spiketrain_as_array = stgen.inhomogeneous_poisson_process(
rate, as_array=True)
self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
# Testing type for refractory period
refractory_period = 3 * pq.ms
spiketrain = stgen.inhomogeneous_poisson_process(
rate, refractory_period=refractory_period)
spiketrain_as_array = stgen.inhomogeneous_poisson_process(
rate, as_array=True, refractory_period=refractory_period)
self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
# Check that to high refractory period raises error
self.assertRaises(
ValueError, stgen.inhomogeneous_poisson_process,
self.rate_profile,
refractory_period=1000 * pq.ms)
def test_effective_rate_refractory_period(self):
np.random.seed(27)
rate_expected = 10 * pq.Hz
refractory_period = 90 * pq.ms # 10 ms of effective ISI
rates = neo.AnalogSignal(np.repeat(rate_expected, 1000), units=pq.Hz,
t_start=0 * pq.ms, sampling_rate=1 * pq.Hz)
spiketrain = stgen.inhomogeneous_poisson_process(
rates, refractory_period=refractory_period)
rate_obtained = len(spiketrain) / spiketrain.t_stop
self.assertAlmostEqual(rate_expected, rate_obtained.simplified,
places=1)
intervals_inhomo = isi(spiketrain)
isi_mean_expected = 1. / rate_expected
self.assertAlmostEqual(isi_mean_expected.simplified,
intervals_inhomo.mean().simplified,
places=3)
def test_zero_rate(self):
for refractory_period in (3 * pq.ms, None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
"""
Catch RuntimeWarning: divide by zero encountered in true_divide
mean_interval = 1 / rate.magnitude, when rate == 0 Hz.
"""
spiketrain = stgen.inhomogeneous_poisson_process(
self.rate_profile_0, refractory_period=refractory_period)
self.assertEqual(spiketrain.size, 0)
def test_negative_rates(self):
for refractory_period in (3 * pq.ms, None):
self.assertRaises(
ValueError, stgen.inhomogeneous_poisson_process,
self.rate_profile_negative,
refractory_period=refractory_period)
class HomogeneousGammaProcessTestCase(unittest.TestCase):
def test_statistics(self):
# This is a statistical test that has a non-zero chance of failure
# during normal operation. Thus, we set the random seed to a value that
# creates a realization passing the test.
np.random.seed(seed=12345)
a = 3.0
for b in (67.0 * pq.Hz, 0.067 * pq.kHz):
for t_stop in (2345 * pq.ms, 2.345 * pq.s):
spiketrain = stgen.homogeneous_gamma_process(
a, b, t_stop=t_stop)
intervals = isi(spiketrain)
expected_spike_count = int((b / a * t_stop).simplified)
# should fail about 1 time in 1000
self.assertLess(
pdiff(expected_spike_count, spiketrain.size), 0.25)
expected_mean_isi = (a / b).rescale(pq.ms)
self.assertLess(
pdiff(expected_mean_isi, intervals.mean()), 0.3)
expected_first_spike = 0 * pq.ms
self.assertLess(
spiketrain[0] - expected_first_spike,
4 * expected_mean_isi)
expected_last_spike = t_stop
self.assertLess(expected_last_spike -
spiketrain[-1], 4 * expected_mean_isi)
# Kolmogorov-Smirnov test
D, p = kstest(intervals.rescale(t_stop.units),
"gamma",
# args are (a, loc, scale)
args=(a, 0, (1 / b).rescale(t_stop.units)),
alternative='two-sided')
self.assertGreater(p, 0.001)
self.assertLess(D, 0.25)
def test_compare_with_as_array(self):
a = 3.
b = 10 * pq.Hz
np.random.seed(27)
spiketrain = stgen.homogeneous_gamma_process(a=a, b=b)
self.assertIsInstance(spiketrain, neo.SpikeTrain)
np.random.seed(27)
spiketrain_array = stgen.homogeneous_gamma_process(a=a, b=b,
as_array=True)
# don't check with isinstance: pq.Quantity is a subclass of np.ndarray
self.assertTrue(isinstance(spiketrain_array, np.ndarray))
assert_array_almost_equal(spiketrain.times.magnitude, spiketrain_array)
class _n_poisson_TestCase(unittest.TestCase):
def setUp(self):
self.n = 4
self.rate = 10 * pq.Hz
self.rates = np.arange(1, self.n + 1) * pq.Hz
self.t_stop = 10000 * pq.ms
def test_poisson(self):
# Check the output types for input rate + n number of neurons
pp = stgen._n_poisson(
rate=self.rate,
t_stop=self.t_stop,
n_spiketrains=self.n)
self.assertIsInstance(pp, list)
self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
self.assertEqual(pp[0].simplified.units, 1000 * pq.ms)
self.assertEqual(len(pp), self.n)
# Check the output types for input list of rates
pp = stgen._n_poisson(rate=self.rates, t_stop=self.t_stop)
self.assertIsInstance(pp, list)
self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
self.assertEqual(pp[0].simplified.units, 1000 * pq.ms)
self.assertEqual(len(pp), self.n)
def test_poisson_error(self):
# Dimensionless rate
self.assertRaises(
ValueError, stgen._n_poisson, rate=5, t_stop=self.t_stop)
# Negative rate
self.assertRaises(
ValueError, stgen._n_poisson, rate=-5 * pq.Hz, t_stop=self.t_stop)
# Negative value when rate is a list
self.assertRaises(
ValueError, stgen._n_poisson, rate=[-5, 3] * pq.Hz,
t_stop=self.t_stop)
# Negative n
self.assertRaises(
ValueError, stgen._n_poisson, rate=self.rate, t_stop=self.t_stop,
n_spiketrains=-1)
# t_start>t_stop
self.assertRaises(
ValueError, stgen._n_poisson, rate=self.rate, t_start=4 * pq.ms,
t_stop=3 * pq.ms, n_spiketrains=3)
class singleinteractionprocess_TestCase(unittest.TestCase):
def setUp(self):
self.n = 4
self.rate = 10 * pq.Hz
self.rates = np.arange(1, self.n + 1) * pq.Hz
self.t_stop = 10000 * pq.ms
self.rate_c = 1 * pq.Hz
def test_sip(self):
# Generate an example SIP mode
sip, coinc = stgen.single_interaction_process(
n_spiketrains=self.n, t_stop=self.t_stop, rate=self.rate,
coincidence_rate=self.rate_c, return_coincidences=True)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000 * pq.ms)
self.assertEqual(coinc[0].simplified.units, 1000 * pq.ms)
# Check the output length
self.assertEqual(len(sip), self.n)
self.assertEqual(
len(coinc[0]), (self.rate_c * self.t_stop).simplified.magnitude)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Generate an example SIP mode giving a list of rates as imput
sip, coinc = stgen.single_interaction_process(
t_stop=self.t_stop, rate=self.rates,
coincidence_rate=self.rate_c, return_coincidences=True)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000 * pq.ms)
self.assertEqual(coinc[0].simplified.units, 1000 * pq.ms)
# Check the output length
self.assertEqual(len(sip), self.n)
self.assertEqual(
len(coinc[0]),
(self.rate_c * self.t_stop).rescale(pq.dimensionless))
# Generate an example SIP mode stochastic number of coincidences
sip = stgen.single_interaction_process(
n_spiketrains=self.n,
t_stop=self.t_stop,
rate=self.rate,
coincidence_rate=self.rate_c,
coincidences='stochastic',
return_coincidences=False)
# Check the output types
self.assertEqual(type(sip), list)
self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
self.assertEqual(sip[0].simplified.units, 1000 * pq.ms)
def test_sip_error(self):
# Negative rate
self.assertRaises(
ValueError, stgen.single_interaction_process, n_spiketrains=self.n,
rate=-5 * pq.Hz,
coincidence_rate=self.rate_c, t_stop=self.t_stop)
# Negative coincidence rate
self.assertRaises(
ValueError, stgen.single_interaction_process, n_spiketrains=self.n,
rate=self.rate, coincidence_rate=-3 * pq.Hz, t_stop=self.t_stop)
# Negative value when rate is a list
self.assertRaises(
ValueError, stgen.single_interaction_process, n_spiketrains=self.n,
rate=[-5, 3, 4, 2] * pq.Hz, coincidence_rate=self.rate_c,
t_stop=self.t_stop)
# Negative n
self.assertRaises(
ValueError, stgen.single_interaction_process, n_spiketrains=-1,
rate=self.rate, coincidence_rate=self.rate_c, t_stop=self.t_stop)
# Rate_c < rate
self.assertRaises(
ValueError,
stgen.single_interaction_process,
n_spiketrains=self.n,
rate=self.rate,
coincidence_rate=self.rate + 1 * pq.Hz,
t_stop=self.t_stop)
class cppTestCase(unittest.TestCase):
def test_cpp_hom(self):
# testing output with generic inputs
amplitude_distribution = np.array([0, .9, .1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 3 * pq.Hz
cpp_hom = stgen.cpp(rate, amplitude_distribution,
t_stop, t_start=t_start)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_hom],
[neo.SpikeTrain] * len(cpp_hom))
self.assertEqual(cpp_hom[0].simplified.units, 1000 * pq.ms)
self.assertEqual(type(cpp_hom), list)
# testing quantities format of the output
self.assertEqual(
[train.simplified.units for train in cpp_hom],
[1000 * pq.ms] * len(cpp_hom))
# testing output t_start t_stop
for st in cpp_hom:
self.assertEqual(st.t_stop, t_stop)
self.assertEqual(st.t_start, t_start)
self.assertEqual(len(cpp_hom), len(amplitude_distribution) - 1)
# testing the units
t_stop = 10000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 3 * pq.Hz
cpp_unit = stgen.cpp(rate, amplitude_distribution,
t_stop, t_start=t_start)
self.assertEqual(cpp_unit[0].units, t_stop.units)
self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
# testing output without copy of spikes
amplitude_distribution = np.array([1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 3 * pq.Hz
cpp_hom_empty = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_hom_empty], [0] * len(cpp_hom_empty))
# testing output with rate equal to 0
amplitude_distribution = np.array([0, .9, .1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 0 * pq.Hz
cpp_hom_empty_r = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_hom_empty_r], [0] * len(
cpp_hom_empty_r))
# testing output with same spike trains in output
amplitude_distribution = np.array([0., 0., 1.])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 3 * pq.Hz
cpp_hom_eq = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertTrue(
np.allclose(cpp_hom_eq[0].magnitude, cpp_hom_eq[1].magnitude))
def test_cpp_hom_errors(self):
# testing raises of ValueError (wrong inputs)
# testing empty amplitude
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[],
t_stop=10 * 1000 * pq.ms,
rate=3 * pq.Hz)
# testing sum of amplitude>1
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[1, 1, 1],
t_stop=10 * 1000 * pq.ms,
rate=3 * pq.Hz)
# testing negative value in the amplitude
self.assertRaises(
ValueError, stgen.cpp, amplitude_distribution=[-1, 1, 1],
t_stop=10 * 1000 * pq.ms,
rate=3 * pq.Hz)
# test negative rate
with warnings.catch_warnings():
warnings.simplefilter("ignore")
"""
Catches RuntimeWarning: invalid value encountered in sqrt
number = np.ceil(n + 3 * np.sqrt(n)), when `n` == -3 Hz.
"""
self.assertRaises(
ValueError, stgen.cpp, amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=-3 * pq.Hz)
# test wrong unit for rate
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=3 * 1000 * pq.ms)
# testing raises of AttributeError (missing input units)
# Testing missing unit to t_stop
self.assertRaises(
ValueError, stgen.cpp, amplitude_distribution=[0, 1, 0], t_stop=10,
rate=3 * pq.Hz)
# Testing missing unit to t_start
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=3 * pq.Hz,
t_start=3)
# testing rate missing unit
self.assertRaises(
AttributeError, stgen.cpp, amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=3)
def test_cpp_het(self):
# testing output with generic inputs
amplitude_distribution = np.array([0, .9, .1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = [3, 4] * pq.Hz
with warnings.catch_warnings():
warnings.simplefilter("ignore")
"""
Catch RuntimeWarning: divide by zero encountered in true_divide
mean_interval = 1 / rate.magnitude, when rate == 0 Hz.
"""
cpp_het = stgen.cpp(rate, amplitude_distribution,
t_stop, t_start=t_start)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_het],
[neo.SpikeTrain] * len(cpp_het))
self.assertEqual(cpp_het[0].simplified.units, 1000 * pq.ms)
self.assertEqual(type(cpp_het), list)
# testing units
self.assertEqual(
[train.simplified.units for train in cpp_het],
[1000 * pq.ms] * len(cpp_het))
# testing output t_start and t_stop
for st in cpp_het:
self.assertEqual(st.t_stop, t_stop)
self.assertEqual(st.t_start, t_start)
# testing the number of output spiketrains
self.assertEqual(len(cpp_het), len(amplitude_distribution) - 1)
self.assertEqual(len(cpp_het), len(rate))
# testing the units
t_stop = 10000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = [3, 4] * pq.Hz
cpp_unit = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertEqual(cpp_unit[0].units, t_stop.units)
self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
# testing without copying any spikes
amplitude_distribution = np.array([1, 0, 0])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = [3, 4] * pq.Hz
cpp_het_empty = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertEqual(len(cpp_het_empty[0]), 0)
# testing output with rate equal to 0
amplitude_distribution = np.array([0, .9, .1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = [0, 0] * pq.Hz
cpp_het_empty_r = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertEqual(
[len(train) for train in cpp_het_empty_r], [0] * len(
cpp_het_empty_r))
# testing completely sync spiketrains
amplitude_distribution = np.array([0, 0, 1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = [3, 3] * pq.Hz
cpp_het_eq = stgen.cpp(
rate, amplitude_distribution, t_stop, t_start=t_start)
self.assertTrue(np.allclose(
cpp_het_eq[0].magnitude, cpp_het_eq[1].magnitude))
def test_cpp_het_err(self):
# testing raises of ValueError (wrong inputs)
# testing empty amplitude
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz)
# testing sum amplitude>1
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[1, 1, 1],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz)
# testing amplitude negative value
self.assertRaises(
ValueError, stgen.cpp, amplitude_distribution=[-1, 1, 1],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz)
# testing negative rate
self.assertRaises(ValueError, stgen.cpp, amplitude_distribution=[
0, 1, 0], t_stop=10 * 1000 * pq.ms, rate=[-3, 4] * pq.Hz)
# testing empty rate
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=[] * pq.Hz)
# testing empty amplitude
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz)
# testing different len(A)-1 and len(rate)
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz)
# testing rate with different unit from Hz
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * 1000 * pq.ms)
# Testing analytical constrain between amplitude and rate
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 0, 1],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz,
t_start=3)
# testing raises of AttributeError (missing input units)
# Testing missing unit to t_stop
self.assertRaises(
ValueError, stgen.cpp, amplitude_distribution=[0, 1, 0], t_stop=10,
rate=[3, 4] * pq.Hz)
# Testing missing unit to t_start
self.assertRaises(
ValueError,
stgen.cpp,
amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4] * pq.Hz,
t_start=3)
# Testing missing unit to rate
self.assertRaises(
AttributeError, stgen.cpp, amplitude_distribution=[0, 1, 0],
t_stop=10 * 1000 * pq.ms,
rate=[3, 4])
def test_cpp_jttered(self):
# testing output with generic inputs
amplitude_distribution = np.array([0, .9, .1])
t_stop = 10 * 1000 * pq.ms
t_start = 5 * 1000 * pq.ms
rate = 3 * pq.Hz
cpp_shift = stgen.cpp(
rate,
amplitude_distribution,
t_stop,
t_start=t_start,
shift=3 * pq.ms)
# testing the ouput formats
self.assertEqual(
[type(train) for train in cpp_shift], [neo.SpikeTrain] * len(
cpp_shift))
self.assertEqual(cpp_shift[0].simplified.units, 1000 * pq.ms)
self.assertEqual(type(cpp_shift), list)
# testing quantities format of the output
self.assertEqual(
[train.simplified.units for train in cpp_shift],
[1000 * pq.ms] * len(cpp_shift))
# testing output t_start t_stop
for spiketrain in cpp_shift:
self.assertEqual(spiketrain.t_stop, t_stop)
self.assertEqual(spiketrain.t_start, t_start)
self.assertEqual(len(cpp_shift), len(amplitude_distribution) - 1)
class HomogeneousPoissonProcessWithRefrPeriodTestCase(unittest.TestCase):
def test_invalid(self):
rate = 10 * pq.Hz
# t_stop < t_start
hpp = stgen.homogeneous_poisson_process
self.assertRaises(ValueError, hpp, rate=rate, t_start=5 * pq.ms,
t_stop=1 * pq.ms, refractory_period=3 * pq.ms)
# no units provided
self.assertRaises(ValueError, hpp, rate=10,
refractory_period=3 * pq.ms)
self.assertRaises(ValueError, hpp, rate=rate, t_stop=5,
refractory_period=3 * pq.ms)
self.assertRaises(ValueError, hpp, rate=rate, refractory_period=2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
hchen1202/django-react | virtualenv/lib/python3.6/site-packages/django/core/checks/registry.py | 114 | 3399 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.utils.itercompat import is_iterable
class Tags(object):
"""
Built-in tags for internal checks.
"""
admin = 'admin'
caches = 'caches'
compatibility = 'compatibility'
database = 'database'
models = 'models'
security = 'security'
signals = 'signals'
templates = 'templates'
urls = 'urls'
class CheckRegistry(object):
def __init__(self):
self.registered_checks = []
self.deployment_checks = []
def register(self, check=None, *tags, **kwargs):
"""
Can be used as a function or a decorator. Register given function
`f` labeled with given `tags`. The function should receive **kwargs
and return list of Errors and Warnings.
Example::
registry = CheckRegistry()
@registry.register('mytag', 'anothertag')
def my_check(apps, **kwargs):
# ... perform checks and collect `errors` ...
return errors
# or
registry.register(my_check, 'mytag', 'anothertag')
"""
kwargs.setdefault('deploy', False)
def inner(check):
check.tags = tags
if kwargs['deploy']:
if check not in self.deployment_checks:
self.deployment_checks.append(check)
elif check not in self.registered_checks:
self.registered_checks.append(check)
return check
if callable(check):
return inner(check)
else:
if check:
tags += (check, )
return inner
def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False):
"""
Run all registered checks and return list of Errors and Warnings.
"""
errors = []
checks = self.get_checks(include_deployment_checks)
if tags is not None:
checks = [check for check in checks
if hasattr(check, 'tags') and set(check.tags) & set(tags)]
else:
# By default, 'database'-tagged checks are not run as they do more
# than mere static code analysis.
checks = [check for check in checks
if not hasattr(check, 'tags') or Tags.database not in check.tags]
for check in checks:
new_errors = check(app_configs=app_configs)
assert is_iterable(new_errors), (
"The function %r did not return a list. All functions registered "
"with the checks registry must return a list." % check)
errors.extend(new_errors)
return errors
def tag_exists(self, tag, include_deployment_checks=False):
return tag in self.tags_available(include_deployment_checks)
def tags_available(self, deployment_checks=False):
return set(chain(*[check.tags for check in self.get_checks(deployment_checks) if hasattr(check, 'tags')]))
def get_checks(self, include_deployment_checks=False):
checks = list(self.registered_checks)
if include_deployment_checks:
checks.extend(self.deployment_checks)
return checks
registry = CheckRegistry()
register = registry.register
run_checks = registry.run_checks
tag_exists = registry.tag_exists
| mit |
sid24ss/distcc | include_server/parse_file.py | 24 | 12527 | #! /usr/bin/python2.4
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
"""A very fast directives-only parser for C and C++ source code.
We parse only the following directives:
#include (the standard C/C++ inclusion mechanism)
#include_next (a GNU C/C++ extension)
#import (an Objective-C feature, similar to #include)
#define (because #defines can affect the results of '#include MACRO')
"""
__author__ = 'Nils Klarlund'
import re
import time
import basics
import cache_basics
import statistics
Debug = basics.Debug
DEBUG_TRACE = basics.DEBUG_TRACE
DEBUG_TRACE2 = basics.DEBUG_TRACE2
NotCoveredError = basics.NotCoveredError
# For coarse and fast scanning
RE_INCLUDE_DEFINE = re.compile("include|define|import")
# For fine-grained, but slow backtracking, parsing
POUND_SIGN_RE = re.compile(r"""
^ # start of line
[ \t]* # space(s)
([*][/])? # a possible ..*/ ending block comment
[ \t]* # space(s)
([/][*] [^\n]* [*][/])* # initial block comment(s) /*...*/
[ \t]* # space(s)
(?P<directive> # group('directive') -- what we're after
[#] # the pound sign
[ \t]* # space(s)
(define|include_next|include|import)\b # the directive
((?!\\\n).)* # the rest on this line: zero or more
# characters, each not a backslash that
# is followed by \n
(\\\n((?!\\\n).)*)* # (backslash + \n + rest of line)*
)
""", re.VERBOSE + re.MULTILINE)
NOT_COMMA_OR_PARENS = "([^(),])"
# For parsing macro expressions of the form:
# symbol
# symbol (something, ..., something), where something is not ',', '(', or ')'
MACRO_EXPR = r"""
(?P<symbol>\w+) # the symbol, named 'symbol'
( \s*
[(] \s* # beginning parenthesis
(?P<args> # a parenthesized expression (with no
# containing expressions -- a limitation)
# named 'args'
%(NOT_COMMA_OR_PARENS)s* # the first argument (if it exists)
([,]%(NOT_COMMA_OR_PARENS)s*)* # subsequent arguments
)
[)] # ending parenthesis
)?""" % {'NOT_COMMA_OR_PARENS': NOT_COMMA_OR_PARENS}
MACRO_EXPR_RE = re.compile(MACRO_EXPR, re.VERBOSE)
# Nice little parser of certain directive lines (after backslash-ended
# line continuations and comments are removed)
DIRECTIVE_RE = re.compile(r"""
^[ \t]*
[#]
[ \t]*
(
((?P<include> include_next | include | import)
\s*
( "(?P<quote> (\w|[_/.,+-])*)" | # "bar/foo.h"
<(?P<angle> (\w|[_/.,+-])*)> | # <stdio.h>
(?P<expr> .*?)) # expr, match . minimally
)
|
(?P<define> define \s+ (?P<lhs> %s) # insert MACRO_EXPR here
\s* (?P<rhs> .*?)) # match . minimally before
# trailing white space
)
\s* # trailing whitespace
((/[*]|//).*)? # optional trailing comment start
$
""" % MACRO_EXPR,
re.VERBOSE)
#
INCLUDE_STRING_RE = re.compile(r"""
^
\s*
( "\s*(?P<quote> (\w|[\\_/.,+-])*)\s*" |
<\s*(?P<angle> (\w|[\\_/.,+-])*)\s*>
)
\s*
$
""", re.VERBOSE)
# For ridding lines of backslash
BACKSLASH_RE = re.compile(r"\\\n", re.MULTILINE)
# For matching non-comment prefix of line.
COMMENT_RE = re.compile(r"((?!/[*]|//).)*")
# FOR SEARCHING AFTER /* .. */.
PAIRED_COMMENT_RE = re.compile(r"(/[*].*?[*]/)")
def InsertMacroDefInTable(lhs, rhs, symbol_table, callback_function):
"""Insert the definition of a pair (lhs, rhs) into symbol table.
Arguments:
lhs: a string, of the form "symbol" or "symbol(param1, ..., paramN)"
rhs: a string
symbol_table: where the definition will be inserted
callback_function: a function called with value "symbol"
"""
m_expr = MACRO_EXPR_RE.match(lhs)
if m_expr.end(0) != len(lhs):
raise NotCoveredError(
"Unexpected macro definition with LHS: '%s'." % lhs)
# Calculate the definition df, either
# - a pair ([arg_1, .., arg_n], rhs) where arg_i is the
# i'th formal parameter (function-like macro definition), or
# - just a symbol (object-like macro definition)
if m_expr.group('args') != None: # perhaps ''
# A function-like macro definition.
# Construct pair (list of formal parameters, rhs).
args = m_expr.group('args').split(',')
df = args, rhs
# lhs is adjusted to be just the 'function' name
lhs = m_expr.group('symbol')
else: # m_expr.group('args')
# An object-like macro definition
assert m_expr.group('symbol') == lhs
df = rhs
if lhs not in symbol_table:
symbol_table[lhs] = [df]
else:
symbol_table[lhs].append(df)
callback_function(lhs)
class ParseFile(object):
"""Parser class for syntax understood by CPP, the C and C++
preprocessor. An instance of this class defines the Parse method."""
def __init__(self, includepath_map):
"""Constructor. Make a parser.
Arguments:
includepath_map: string-to-index map for includepaths
"""
assert isinstance(includepath_map, cache_basics.MapToIndex)
self.includepath_map = includepath_map
self.define_callback = lambda x: None
def SetDefineCallback(self, callback_function):
"""Set a callback function, which is invoked for '#define's.
The function is called as callback_function(symbol), whenever a '#define'
of symbol is parsed. The callback allows an include processor to adjust
its notion of which expressions are still current. If we (the include
processor) already met
#define A B
and later meet
#define B
whether this is the first definition of B or not, then the possible
meanings of A have changed. We set up a callback to identify such
situations."""
self.define_callback = callback_function
def _ParseFine(self, poundsign_match, includepath_map_index, file_contents,
symbol_table, quote_includes, angle_includes, expr_includes,
next_includes):
"""Helper function for ParseFile."""
Debug(DEBUG_TRACE2, "_ParseFine %s",
file_contents[poundsign_match.start('directive'):
poundsign_match.end('directive')])
m = DIRECTIVE_RE.match( # parse the directive
PAIRED_COMMENT_RE.sub( # remove possible paired comments
"",
BACKSLASH_RE.sub( # get rid of lines ending in backslash
"",
file_contents[poundsign_match.start('directive'):
poundsign_match.end('directive')])))
if m:
try:
groupdict = m.groupdict()
if groupdict['include'] == 'include' or \
groupdict['include'] == 'import':
if groupdict['quote']:
quote_includes.append(includepath_map_index(m.group('quote')))
elif groupdict['angle']:
angle_includes.append(includepath_map_index(m.group('angle')))
elif groupdict['expr']:
expr_includes.append(m.group('expr').rstrip())
else:
assert False
elif groupdict['include'] == 'include_next':
# We do not, in fact, distinguish between the two kinds of
# include_next's, because we conservatively assume that they are of
# the quote variety.
if groupdict['quote']:
next_includes.append(includepath_map_index(m.group('quote')))
elif groupdict['angle']:
next_includes.append(includepath_map_index(m.group('angle')))
# The following restriction would not be too hard to remove.
elif groupdict['expr']:
NotCoveredError(
"For include_next: cannot deal with computed include here.")
else:
assert False
raise NotCoveredError("include_next not parsed")
elif groupdict['define']:
if not groupdict['lhs']:
raise NotCoveredError("Unexpected macro definition with no LHS.")
else:
lhs = m.group('lhs')
rhs = groupdict['rhs'] and groupdict['rhs'] or None
InsertMacroDefInTable(lhs, rhs, symbol_table, self.define_callback)
except NotCoveredError, inst:
# Decorate this exception with the filename, by recreating it
# appropriately.
if not inst.source_file:
raise NotCoveredError(inst.args[0],
self.filepath,
send_email = inst.send_email)
else:
raise
def Parse(self, filepath, symbol_table):
"""Parse filepath for preprocessor directives and update symbol table.
Arguments:
filepath: a string
symbol_table: a dictionary, see module macro_expr
Returns:
(quote_includes, angle_includes, expr_includes, next_includes), where
all are lists of filepath indices, except for expr_includes, which is a
list of expressions.
"""
Debug(DEBUG_TRACE, "ParseFile %s", filepath)
assert isinstance(filepath, str)
self.filepath = filepath
parse_file_start_time = time.clock()
statistics.parse_file_counter += 1
includepath_map_index = self.includepath_map.Index
try:
fd = open(filepath, "r")
except IOError, msg:
# This normally does not happen because the file should be known to
# exists. Still there might be, say, a permissions issue that prevents it
# from being read.
raise NotCoveredError("Parse file: '%s': %s" % (filepath, msg),
send_email=False)
file_contents = fd.read()
fd.close()
quote_includes, angle_includes, expr_includes, next_includes = (
[], [], [], [])
i = 0
line_start_last = None
while True:
# Scan coarsely to find something of interest
mfast = RE_INCLUDE_DEFINE.search(file_contents, i + 1)
if not mfast: break
i = mfast.end()
# Identify the line of interest by scanning backwards to \n
line_start = file_contents.rfind("\n", 0, i) + 1 # to beginning of line
# Now, line_start is -1 if \n was not found.
### TODO(klarlund) continue going back if line continuation preceeding
# Is this really a new line?
if line_start == line_start_last: continue
line_start_last = line_start
# Here we should really skip back over lines to see whether a totally
# pathological situation involving '\'-terminated lines like:
#
# #include <stdio.h>
# # Start of pathological situation involving line continuations:
# # \
# \
# \
# \
# include "nidgaard.h"
#
# occurs, where the first # on each line is just Python syntax and should
# not be considered as part of the C/C++ example. This code defines a
# valid directive to include "nidgaard.h". We will not handle such
# situations correctly -- the include will be missed.
# Parse the line of interest according to fine-grained parser
poundsign_match = POUND_SIGN_RE.match(file_contents, line_start)
if not poundsign_match:
continue
self._ParseFine(poundsign_match, includepath_map_index, file_contents,
symbol_table, quote_includes, angle_includes,
expr_includes, next_includes)
statistics.parse_file_total_time += time.clock() - parse_file_start_time
return (quote_includes, angle_includes, expr_includes, next_includes)
| gpl-2.0 |
eshook/Forest | test_forest.py | 1 | 1438 | """
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, [email protected])
@contributors: <Contribute and add your name here!>
"""
# Begin coverage testing
import coverage
cov = coverage.coverage(omit = "/usr/*")
cov.start()
from forest import *
import unittest
# Import Forest unit tests
from unittests import *
# Use this to test a single suite (e.g., test_Bob)
#unittest.TextTestRunner(verbosity=2).run(test_Bob_suite)
# Test the full suite (each test file in the unittests directory stores a unittest suite)
# The following line collapses them into a 'full suite' of tests
# Note: This does not test CUDA
full_suite = unittest.TestSuite([test_Bob_suite,test_Bobs_suite,test_IO_suite,test_PrimitivesRaster_suite])
# Replace with this line to test for Bmsb with CUDA
#full_suite = unittest.TestSuite([test_Bmsb_suite,test_Bob_suite,test_Bobs_suite,test_IO_suite,test_PrimitivesRaster_suite])
# Run the full suit using the 'unittest' package
run_return = unittest.TextTestRunner(verbosity=2).run(full_suite)
# If unit tests are successful, then run a coverage test
if run_return.wasSuccessful():
print("Unit tests successful, running coverage test")
# Stop coverage testing and print out a report on percent of code covered by testing
cov.stop()
cov.report(show_missing = True)
| bsd-3-clause |
RongxinZhang/bitcoinxt | qa/rpc-tests/script_test.py | 94 | 9499 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
'''
Test notes:
This test uses the script_valid and script_invalid tests from the unittest
framework to do end-to-end testing where we compare that two nodes agree on
whether blocks containing a given test script are valid.
We generally ignore the script flags associated with each test (since we lack
the precision to test each script using those flags in this framework), but
for tests with SCRIPT_VERIFY_P2SH, we can use a block time after the BIP16
switchover date to try to test with that flag enabled (and for tests without
that flag, we use a block time before the switchover date).
NOTE: This test is very slow and may take more than 40 minutes to run.
'''
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestInstance, TestManager
from test_framework.mininode import *
from test_framework.blocktools import *
from test_framework.script import *
import logging
import copy
import json
script_valid_file = "../../src/test/data/script_valid.json"
script_invalid_file = "../../src/test/data/script_invalid.json"
# Pass in a set of json files to open.
class ScriptTestFile(object):
def __init__(self, files):
self.files = files
self.index = -1
self.data = []
def load_files(self):
for f in self.files:
self.data.extend(json.loads(open(os.path.dirname(os.path.abspath(__file__))+"/"+f).read()))
# Skip over records that are not long enough to be tests
def get_records(self):
while (self.index < len(self.data)):
if len(self.data[self.index]) >= 3:
yield self.data[self.index]
self.index += 1
# Helper for parsing the flags specified in the .json files
SCRIPT_VERIFY_NONE = 0
SCRIPT_VERIFY_P2SH = 1
SCRIPT_VERIFY_STRICTENC = 1 << 1
SCRIPT_VERIFY_DERSIG = 1 << 2
SCRIPT_VERIFY_LOW_S = 1 << 3
SCRIPT_VERIFY_NULLDUMMY = 1 << 4
SCRIPT_VERIFY_SIGPUSHONLY = 1 << 5
SCRIPT_VERIFY_MINIMALDATA = 1 << 6
SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS = 1 << 7
SCRIPT_VERIFY_CLEANSTACK = 1 << 8
flag_map = {
"": SCRIPT_VERIFY_NONE,
"NONE": SCRIPT_VERIFY_NONE,
"P2SH": SCRIPT_VERIFY_P2SH,
"STRICTENC": SCRIPT_VERIFY_STRICTENC,
"DERSIG": SCRIPT_VERIFY_DERSIG,
"LOW_S": SCRIPT_VERIFY_LOW_S,
"NULLDUMMY": SCRIPT_VERIFY_NULLDUMMY,
"SIGPUSHONLY": SCRIPT_VERIFY_SIGPUSHONLY,
"MINIMALDATA": SCRIPT_VERIFY_MINIMALDATA,
"DISCOURAGE_UPGRADABLE_NOPS": SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS,
"CLEANSTACK": SCRIPT_VERIFY_CLEANSTACK,
}
def ParseScriptFlags(flag_string):
flags = 0
for x in flag_string.split(","):
if x in flag_map:
flags |= flag_map[x]
else:
print "Error: unrecognized script flag: ", x
return flags
'''
Given a string that is a scriptsig or scriptpubkey from the .json files above,
convert it to a CScript()
'''
# Replicates behavior from core_read.cpp
def ParseScript(json_script):
script = json_script.split(" ")
parsed_script = CScript()
for x in script:
if len(x) == 0:
# Empty string, ignore.
pass
elif x.isdigit() or (len(x) >= 1 and x[0] == "-" and x[1:].isdigit()):
# Number
n = int(x, 0)
if (n == -1) or (n >= 1 and n <= 16):
parsed_script = CScript(bytes(parsed_script) + bytes(CScript([n])))
else:
parsed_script += CScriptNum(int(x, 0))
elif x.startswith("0x"):
# Raw hex data, inserted NOT pushed onto stack:
for i in xrange(2, len(x), 2):
parsed_script = CScript(bytes(parsed_script) + bytes(chr(int(x[i:i+2],16))))
elif x.startswith("'") and x.endswith("'") and len(x) >= 2:
# Single-quoted string, pushed as data.
parsed_script += CScript([x[1:-1]])
else:
# opcode, e.g. OP_ADD or ADD:
tryopname = "OP_" + x
if tryopname in OPCODES_BY_NAME:
parsed_script += CScriptOp(OPCODES_BY_NAME["OP_" + x])
else:
print "ParseScript: error parsing '%s'" % x
return ""
return parsed_script
class TestBuilder(object):
def create_credit_tx(self, scriptPubKey):
# self.tx1 is a coinbase transaction, modeled after the one created by script_tests.cpp
# This allows us to reuse signatures created in the unit test framework.
self.tx1 = create_coinbase() # this has a bip34 scriptsig,
self.tx1.vin[0].scriptSig = CScript([0, 0]) # but this matches the unit tests
self.tx1.vout[0].nValue = 0
self.tx1.vout[0].scriptPubKey = scriptPubKey
self.tx1.rehash()
def create_spend_tx(self, scriptSig):
self.tx2 = create_transaction(self.tx1, 0, CScript(), 0)
self.tx2.vin[0].scriptSig = scriptSig
self.tx2.vout[0].scriptPubKey = CScript()
self.tx2.rehash()
def rehash(self):
self.tx1.rehash()
self.tx2.rehash()
# This test uses the (default) two nodes provided by ComparisonTestFramework,
# specified on the command line with --testbinary and --refbinary.
# See comptool.py
class ScriptTest(ComparisonTestFramework):
def run_test(self):
# Set up the comparison tool TestManager
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
# Load scripts
self.scripts = ScriptTestFile([script_valid_file, script_invalid_file])
self.scripts.load_files()
# Some variables we re-use between test instances (to build blocks)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def generate_test_instance(self, pubkeystring, scriptsigstring):
scriptpubkey = ParseScript(pubkeystring)
scriptsig = ParseScript(scriptsigstring)
test = TestInstance(sync_every_block=False)
test_build = TestBuilder()
test_build.create_credit_tx(scriptpubkey)
test_build.create_spend_tx(scriptsig)
test_build.rehash()
block = create_block(self.tip, test_build.tx1, self.block_time)
self.block_time += 1
block.solve()
self.tip = block.sha256
test.blocks_and_transactions = [[block, True]]
for i in xrange(100):
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.solve()
self.tip = block.sha256
test.blocks_and_transactions.append([block, True])
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.vtx.append(test_build.tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
test.blocks_and_transactions.append([block, None])
return test
# This generates the tests for TestManager.
def get_tests(self):
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = 1333230000 # before the BIP16 switchover
'''
Create a new block with an anyone-can-spend coinbase
'''
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.solve()
self.tip = block.sha256
yield TestInstance(objects=[[block, True]])
'''
Build out to 100 blocks total, maturing the coinbase.
'''
test = TestInstance(objects=[], sync_every_block=False, sync_every_tx=False)
for i in xrange(100):
b = create_block(self.tip, create_coinbase(), self.block_time)
b.solve()
test.blocks_and_transactions.append([b, True])
self.tip = b.sha256
self.block_time += 1
yield test
''' Iterate through script tests. '''
counter = 0
for script_test in self.scripts.get_records():
''' Reset the blockchain to genesis block + 100 blocks. '''
if self.nodes[0].getblockcount() > 101:
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(102))
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(102))
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
[scriptsig, scriptpubkey, flags] = script_test[0:3]
flags = ParseScriptFlags(flags)
# We can use block time to determine whether the nodes should be
# enforcing BIP16.
#
# We intentionally let the block time grow by 1 each time.
# This forces the block hashes to differ between tests, so that
# a call to invalidateblock doesn't interfere with a later test.
if (flags & SCRIPT_VERIFY_P2SH):
self.block_time = 1333238400 + counter # Advance to enforcing BIP16
else:
self.block_time = 1333230000 + counter # Before the BIP16 switchover
print "Script test: [%s]" % script_test
yield self.generate_test_instance(scriptpubkey, scriptsig)
counter += 1
if __name__ == '__main__':
ScriptTest().main()
| mit |
bridadan/pyOCD | pyOCD/pyDAPAccess/interface/__init__.py | 3 | 1881 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
from hidapi_backend import HidApiUSB
from pyusb_backend import PyUSB
from pywinusb_backend import PyWinUSB
INTERFACE = {
'hidapiusb': HidApiUSB,
'pyusb': PyUSB,
'pywinusb': PyWinUSB
}
# Allow user to override backend with an environment variable.
usb_backend = os.getenv('PYOCD_USB_BACKEND', "")
# Check validity of backend env var.
if usb_backend and ((usb_backend not in INTERFACE.keys()) or (not INTERFACE[usb_backend].isAvailable)):
logging.error("Invalid USB backend specified in PYOCD_USB_BACKEND: " + usb_backend)
usb_backend = ""
# Select backend based on OS and availability.
if not usb_backend:
if os.name == "nt":
# Prefer hidapi over pyWinUSB for Windows, since pyWinUSB has known bug(s)
if HidApiUSB.isAvailable:
usb_backend = "hidapiusb"
elif PyWinUSB.isAvailable:
usb_backend = "pywinusb"
else:
raise Exception("No USB backend found")
elif os.name == "posix":
# Select hidapi for OS X and pyUSB for Linux.
if os.uname()[0] == 'Darwin':
usb_backend = "hidapiusb"
else:
usb_backend = "pyusb"
else:
raise Exception("No USB backend found")
| apache-2.0 |
vortex-exoplanet/VIP | vip_hci/negfc/utils_negfc.py | 2 | 8821 | #! /usr/bin/env python
"""
Module with post-processing related functions called from within the NFC
algorithm.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['cube_planet_free']
import numpy as np
from ..metrics import cube_inject_companions
import math
from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show
def cube_planet_free(planet_parameter, cube, angs, psfn, plsc, imlib='opencv',
interpolation='lanczos4',transmission=None):
"""
Return a cube in which we have injected negative fake companion at the
position/flux given by planet_parameter.
Parameters
----------
planet_parameter: numpy.array or list
The (r, theta, flux) for all known companions. For a 4d cube r,
theta and flux must all be 1d arrays with length equal to cube.shape[0];
i.e. planet_parameter should have shape: (n_pl,3,n_ch).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
cpf : numpy.array
The cube with negative companions injected at the position given in
planet_parameter.
"""
cpf = np.zeros_like(cube)
planet_parameter = np.array(planet_parameter)
if cube.ndim == 4:
if planet_parameter.shape[3] != cube.shape[0]:
raise TypeError("Input planet parameter with wrong dimensions.")
for i in range(planet_parameter.shape[0]):
if i == 0:
cube_temp = cube
else:
cube_temp = cpf
if cube.ndim == 4:
for j in cube.shape[0]:
cpf[j] = cube_inject_companions(cube_temp[j], psfn[j], angs,
flevel=-planet_parameter[i, 2, j],
plsc=plsc,
rad_dists=[planet_parameter[i, 0, j]],
n_branches=1,
theta=planet_parameter[i, 1, j],
imlib=imlib,
interpolation=interpolation,
verbose=False,
transmission=transmission)
else:
cpf = cube_inject_companions(cube_temp, psfn, angs,
flevel=-planet_parameter[i, 2], plsc=plsc,
rad_dists=[planet_parameter[i, 0]],
n_branches=1, theta=planet_parameter[i, 1],
imlib=imlib, interpolation=interpolation,
verbose=False, transmission=transmission)
return cpf
def radial_to_eq(r=1, t=0, rError=0, tError=0, display=False):
"""
Convert the position given in (r,t) into \delta RA and \delta DEC, as
well as the corresponding uncertainties.
t = 0 deg (resp. 90 deg) points toward North (resp. East).
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate.
rError: float
The error bar related to r.
tError: float
The error bar related to t.
display: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError,r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if display:
plot(u,v,'ks',x,y,'r')
plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')
plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)),'ok')
plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)),'ok')
plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r,
np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')
plot([0,r*np.cos(nu+math.radians(tError*0))],
[0,r*np.sin(nu+math.radians(tError*0))],'k')
axes().set_aspect('equal')
lim = np.amax([a,b]) * 2.
xlim([ra-lim,ra+lim])
ylim([dec-lim,dec+lim])
gca().invert_xaxis()
show()
return ((ra,np.mean([raErrorInf,raErrorSup])),
(dec,np.mean([decErrorInf,decErrorSup])))
def cart_to_polar(y, x, ceny=0, cenx=0):
"""
Convert cartesian into polar coordinates (r,theta) with
respect to a given center (cenx,ceny).
Parameters
----------
x,y: float
The cartesian coordinates.
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-ceny)**2 + (x-cenx)**2)
theta = np.degrees(np.arctan2(y-ceny, x-cenx))
return r, np.mod(theta,360)
def polar_to_cart(r, theta, ceny=0, cenx=0):
"""
Convert polar coordinates with respect to the center (cenx,ceny) into
cartesian coordinates (x,y) with respect to the bottom left corner of the
image..
Parameters
----------
r,theta: float
The polar coordinates.
Returns
-------
out : tuple
The cartesian coordinates (x,y) with respect to the bottom left corner
of the image..
"""
x = r*np.cos(np.deg2rad(theta)) + cenx
y = r*np.sin(np.deg2rad(theta)) + ceny
return x,y
def ds9index_to_polar(y, x, ceny=0, cenx=0):
"""
Convert pixel index read on image displayed with DS9 into polar coordinates
(r,theta) with respect to a given center (cenx,ceny).
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-0.5-ceny)**2 + (x-0.5-cenx)**2)
theta = np.degrees(np.arctan2(y-0.5-ceny, x-0.5-cenx))
return r, np.mod(theta,360)
def polar_to_ds9index(r, theta, ceny=0, cenx=0):
"""
Convert position (r,theta) in an image with respect to a given center
(cenx,ceny) into position in the image displayed with DS9.
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
x_ds9 = r*np.cos(np.deg2rad(theta)) + 0.5 + cenx
y_ds9 = r*np.sin(np.deg2rad(theta)) + 0.5 + ceny
return x_ds9, y_ds9 | mit |
apyrgio/synnefo | snf-astakos-app/astakos/im/views/util.py | 7 | 12280 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import astakos.im.messages as astakos_messages
from astakos.im import settings
from django.contrib import messages
from django.contrib.auth.views import redirect_to_login
from django.core.xheaders import populate_xheaders
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext, loader as template_loader
from django.utils.translation import ugettext as _
from django.views.generic.create_update import apply_extra_context, \
get_model_and_form_class, lookup_object
from astakos.im import transaction
from synnefo.lib.ordereddict import OrderedDict
from astakos.im import presentation
from astakos.im.util import model_to_dict
from astakos.im import tables
from astakos.im.models import Resource, ProjectApplication, ProjectMembership
from astakos.im import functions
from astakos.im.util import get_context, restrict_next, restrict_reverse
logger = logging.getLogger(__name__)
class ExceptionHandler(object):
def __init__(self, request):
self.request = request
def __enter__(self):
pass
def __exit__(self, exc_type, value, traceback):
if value is not None: # exception
logger.exception(value)
m = _(astakos_messages.GENERIC_ERROR)
messages.error(self.request, m)
return True # suppress exception
def render_response(template, tab=None, status=200, context_instance=None,
**kwargs):
"""
Calls ``django.template.loader.render_to_string`` with an additional
``tab`` keyword argument and returns an ``django.http.HttpResponse``
with the specified ``status``.
"""
if tab is None:
tab = template.partition('_')[0].partition('.html')[0]
kwargs.setdefault('tab', tab)
html = template_loader.render_to_string(
template, kwargs, context_instance=context_instance)
response = HttpResponse(html, status=status)
return response
def _create_object(request, model=None, template_name=None,
template_loader=template_loader, extra_context=None,
post_save_redirect=None, login_required=False,
context_processors=None, form_class=None, msg=None,
summary_template_name=None):
"""
Based of django.views.generic.create_update.create_object which displays a
summary page before creating the object.
"""
if extra_context is None:
extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
extra_context['edit'] = 0
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
verify = request.GET.get('verify')
edit = request.GET.get('edit')
if verify == '1':
extra_context['show_form'] = False
extra_context['form_data'] = form.cleaned_data
template_name = summary_template_name
elif edit == '1':
extra_context['show_form'] = True
else:
new_object = form.save()
if not msg:
msg = _(
"The %(verbose_name)s was created successfully.")
msg = msg % model._meta.__dict__
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % \
(model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def _update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None,
template_loader=template_loader, extra_context=None,
post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None, msg=None, summary_template_name=None):
"""
Based of django.views.generic.create_update.update_object which displays a
summary page before updating the object.
"""
if extra_context is None:
extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
verify = request.GET.get('verify')
edit = request.GET.get('edit')
if verify == '1':
extra_context['show_form'] = False
extra_context['form_data'] = form.cleaned_data
template_name = summary_template_name
elif edit == '1':
extra_context['show_form'] = True
else:
obj = form.save()
if not msg:
msg = _("The %(verbose_name)s was created successfully.")
msg = msg % model._meta.__dict__
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % \
(model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model,
getattr(obj, obj._meta.pk.attname))
return response
def sorted_resources(resource_grant_or_quota_set):
meta = presentation.RESOURCES
order = meta.get('resources_order', [])
resources = list(resource_grant_or_quota_set)
def order_key(item):
name = item.resource.name
if name in order:
return order.index(name)
return -1
return sorted(resources, key=order_key)
def _resources_catalog(as_dict=False):
"""
`resource_catalog` contains a list of tuples. Each tuple contains the group
key the resource is assigned to and resources list of dicts that contain
resource information.
`resource_groups` contains information about the groups
"""
# presentation data
resources_meta = presentation.RESOURCES
resource_groups = resources_meta.get('groups', {})
resource_catalog = ()
resource_keys = []
# resources in database
resource_details = map(lambda obj: model_to_dict(obj, exclude=[]),
Resource.objects.all())
# initialize resource_catalog to contain all group/resource information
for r in resource_details:
if not r.get('group') in resource_groups:
resource_groups[r.get('group')] = {'icon': 'unknown'}
resource_keys = [r.get('str_repr') for r in resource_details]
resource_catalog = [[g, filter(lambda r: r.get('group', '') == g,
resource_details)] for g in resource_groups]
# order groups, also include unknown groups
groups_order = resources_meta.get('groups_order')
for g in resource_groups.keys():
if not g in groups_order:
groups_order.append(g)
# order resources, also include unknown resources
resources_order = resources_meta.get('resources_order')
for r in resource_keys:
if not r in resources_order:
resources_order.append(r)
# sort catalog groups
resource_catalog = sorted(resource_catalog,
key=lambda g: groups_order.index(g[0]))
# sort groups
def groupindex(g):
return groups_order.index(g[0])
resource_groups_list = sorted([(k, v) for k, v in resource_groups.items()],
key=groupindex)
resource_groups = OrderedDict(resource_groups_list)
# sort resources
def resourceindex(r):
return resources_order.index(r['str_repr'])
for index, group in enumerate(resource_catalog):
resource_catalog[index][1] = sorted(resource_catalog[index][1],
key=resourceindex)
if len(resource_catalog[index][1]) == 0:
resource_catalog.pop(index)
for gindex, g in enumerate(resource_groups):
if g[0] == group[0]:
resource_groups.pop(gindex)
# filter out resources which user cannot request in a project application
for group, resources in list(resource_catalog):
for resource in resources:
if not resource.get('ui_visible'):
resources.remove(resource)
# cleanup empty groups
resource_catalog_new = []
for group, resources in list(resource_catalog):
if len(resources) == 0:
resource_groups.pop(group)
else:
resource_catalog_new.append((group, resources))
if as_dict:
resource_catalog_new = OrderedDict(resource_catalog_new)
for name, resources in resource_catalog_new.iteritems():
_rs = OrderedDict()
for resource in resources:
_rs[resource.get('name')] = resource
resource_catalog_new[name] = _rs
resource_groups = OrderedDict(resource_groups)
return resource_catalog_new, resource_groups
def get_user_projects_table(projects, user, prefix, request=None):
apps = ProjectApplication.objects.pending_per_project(projects)
memberships = user.projectmembership_set.one_per_project()
objs = ProjectMembership.objects
accepted_ms = objs.any_accepted_per_project(projects)
requested_ms = objs.requested_per_project(projects)
return tables.UserProjectsTable(projects, user=user,
prefix=prefix,
pending_apps=apps,
memberships=memberships,
accepted=accepted_ms,
requested=requested_ms,
request=request)
@transaction.commit_on_success
def handle_valid_members_form(request, project_id, addmembers_form):
if addmembers_form.is_valid():
try:
users = addmembers_form.valid_users
for user in users:
functions.enroll_member_by_email(project_id, user.email,
request_user=request.user)
except functions.ProjectError as e:
messages.error(request, e)
def redirect_to_next(request, default_resolve, *args, **kwargs):
next = kwargs.pop('next', None)
if not next:
default = restrict_reverse(default_resolve, *args,
restrict_domain=settings.COOKIE_DOMAIN,
**kwargs)
next = request.GET.get('next', default)
next = restrict_next(next, domain=settings.COOKIE_DOMAIN)
return redirect(next)
| gpl-3.0 |
bguillot/OpenUpgrade | addons/gamification_sale_crm/__openerp__.py | 67 | 1369 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'CRM Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'depends': ['gamification','sale_crm'],
'description': """Example of goal definitions and challenges that can be used related to the usage of the CRM Sale module.""",
'data': ['sale_crm_goals.xml'],
'demo': ['sale_crm_goals_demo.xml'],
'auto_install': True,
}
| agpl-3.0 |
CiscoSystems/jujucharm-n1k | charms/precise/juju-gui/server/guiserver/auth.py | 1 | 15759 | # This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Juju GUI server authentication management.
This module includes the pieces required to process user authentication.
- User: this is a simple data structure representing a logged in or
anonymous user.
- Authentication backends (GoBackend and PythonBackend): the primary
purpose of auth backends is to provide the logic to parse requests' data
based on the API implementation currently in use. They can also be used
to create authentication requests. They must implement the following
interface:
- get_request_id(data) -> id or None;
- request_is_login(data) -> bool;
- get_credentials(data) -> (str, str);
- login_succeeded(data) -> bool; and
- make_request(request_id, username, password) -> dict.
Backends don't know anything about the authentication process or the
current user, and are not intended to store state: one backend (the one
suitable for the current API implementation) is instantiated once when
the application is bootstrapped and used as a singleton by all WebSocket
requests.
- AuthMiddleware: this middleware processes authentication requests and
responses, using the backend to parse the WebSocket messages, logging in
the current user if the authentication succeeds.
- AuthenticationTokenHandler: This handles authentication token creation
and usage requests. It is used both by the AuthMiddleware and by
handlers.WebSocketHandler in the ``on_message`` and ``on_juju_message``
methods.
"""
import datetime
import logging
import uuid
from tornado.ioloop import IOLoop
class User(object):
"""The current WebSocket user."""
def __init__(self, username='', password='', is_authenticated=False):
self.is_authenticated = is_authenticated
self.username = username
self.password = password
def __repr__(self):
if self.is_authenticated:
status = 'authenticated'
else:
status = 'not authenticated'
username = self.username or 'anonymous'
return '<User: {} ({})>'.format(username, status)
def __str__(self):
return self.username.encode('utf-8')
class AuthMiddleware(object):
"""Handle user authentication.
This class handles the process of authenticating the provided user using
the given auth backend. Note that, since the GUI just disconnects when the
user logs out, there is no need to handle the log out process.
"""
def __init__(self, user, backend, tokens, write_message):
self._user = user
self._backend = backend
self._tokens = tokens
self._write_message = write_message
self._request_ids = {}
def in_progress(self):
"""Return True if authentication is in progress, False otherwise.
"""
return bool(self._request_ids)
def process_request(self, data):
"""Parse the WebSocket data arriving from the client.
Start the authentication process if data represents a login request
performed by the GUI user.
"""
backend = self._backend
tokens = self._tokens
request_id = backend.get_request_id(data)
if request_id is not None:
credentials = None
is_token = False
if backend.request_is_login(data):
credentials = backend.get_credentials(data)
elif tokens.authentication_requested(data):
is_token = True
credentials = tokens.process_authentication_request(
data, self._write_message)
if credentials is None:
# This means that the tokens object handled the request.
return None
else:
# We need a "real" authentication request.
data = backend.make_request(request_id, *credentials)
if credentials is not None:
# Stashing credentials is a security risk. We currently deem
# this risk to be acceptably small. Even keeping an
# authenticated websocket in memory seems to be of a similar
# risk profile, and we cannot operate without that.
self._request_ids[request_id] = dict(
is_token=is_token,
username=credentials[0],
password=credentials[1])
return data
def process_response(self, data):
"""Parse the WebSocket data arriving from the Juju API server.
Complete the authentication process if data represents the response
to a login request previously initiated. Authenticate the user if the
authentication succeeded.
"""
request_id = self._backend.get_request_id(data)
if request_id in self._request_ids:
info = self._request_ids.pop(request_id)
user = self._user
logged_in = self._backend.login_succeeded(data)
if logged_in:
# Stashing credentials is a security risk. We currently deem
# this risk to be acceptably small. Even keeping an
# authenticated websocket in memory seems to be of a similar
# risk profile, and we cannot operate without that.
user.username = info['username']
user.password = info['password']
logging.info('auth: user {} logged in'.format(user))
user.is_authenticated = True
if info['is_token']:
data = self._tokens.process_authentication_response(
data, user)
return data
class GoBackend(object):
"""Authentication backend for the Juju Go API implementation.
A login request looks like the following:
{
'RequestId': 42,
'Type': 'Admin',
'Request': 'Login',
'Params': {'AuthTag': 'user-admin', 'Password': 'ADMIN-SECRET'},
}
Here is an example of a successful login response:
{'RequestId': 42, 'Response': {}}
A login failure response is like the following:
{
'RequestId': 42,
'Error': 'invalid entity name or password',
'ErrorCode': 'unauthorized access',
'Response': {},
}
"""
def get_request_id(self, data):
"""Return the request identifier associated with the provided data."""
return data.get('RequestId')
def request_is_login(self, data):
"""Return True if data represents a login request, False otherwise."""
params = data.get('Params', {})
return (
data.get('Type') == 'Admin' and
data.get('Request') == 'Login' and
'AuthTag' in params and
'Password' in params
)
def get_credentials(self, data):
"""Parse the provided login data and return username and password."""
params = data['Params']
return params['AuthTag'], params['Password']
def login_succeeded(self, data):
"""Return True if data represents a successful login, False otherwise.
"""
return 'Error' not in data
def make_request(self, request_id, username, password):
"""Create and return an authentication request."""
return dict(
RequestId=request_id,
Type='Admin',
Request='Login',
Params=dict(AuthTag=username, Password=password))
class PythonBackend(object):
"""Authentication backend for the Juju Python implementation.
A login request looks like the following:
{
'request_id': 42,
'op': 'login',
'user': 'admin',
'password': 'ADMIN-SECRET',
}
A successful login response includes these fields:
{
'request_id': 42,
'op': 'login',
'user': 'admin',
'password': 'ADMIN-SECRET',
'result': True,
}
A login failure response is like the following:
{
'request_id': 42,
'op': 'login',
'user': 'admin',
'password': 'ADMIN-SECRET',
'err': True,
}
"""
def get_request_id(self, data):
"""Return the request identifier associated with the provided data."""
return data.get('request_id')
def request_is_login(self, data):
"""Return True if data represents a login request, False otherwise."""
op = data.get('op')
return (op == 'login') and ('user' in data) and ('password' in data)
def get_credentials(self, data):
"""Parse the provided login data and return username and password."""
return data['user'], data['password']
def login_succeeded(self, data):
"""Return True if data represents a successful login, False otherwise.
"""
return data.get('result') and not data.get('err')
def make_request(self, request_id, username, password):
"""Create and return an authentication request."""
return dict(
request_id=request_id,
op='login',
user=username,
password=password)
def get_backend(apiversion):
"""Return the auth backend instance to use for the given API version."""
backend_class = {'go': GoBackend, 'python': PythonBackend}[apiversion]
return backend_class()
class AuthenticationTokenHandler(object):
"""Handle requests related to authentication tokens.
A token creation request looks like the following:
{
'RequestId': 42,
'Type': 'GUIToken',
'Request': 'Create',
'Params': {},
}
Here is an example of a successful token creation response.
{
'RequestId': 42,
'Response': {
'Token': 'TOKEN-STRING',
'Created': '2013-11-21T12:34:46.778866Z',
'Expires': '2013-11-21T12:36:46.778866Z'
}
}
If the user is not authenticated, the failure response will look like this.
{
'RequestId': 42,
'Error': 'tokens can only be created by authenticated users.',
'ErrorCode': 'unauthorized access',
'Response': {},
}
A token authentication request looks like the following:
{
'RequestId': 42,
'Type': 'GUIToken',
'Request': 'Login',
'Params': {'Token': 'TOKEN-STRING'},
}
Here is an example of a successful login response:
{
'RequestId': 42,
'Response': {'AuthTag': 'user-admin', 'Password': 'ADMIN-SECRET'}
}
A login failure response is like the following:
{
'RequestId': 42,
'Error': 'unknown, fulfilled, or expired token',
'ErrorCode': 'unauthorized access',
'Response': {},
}
Juju itself might return a failure response like the following, but this
would be difficult or impossible to trigger as of this writing:
{
'RequestId': 42,
'Error': 'invalid entity name or password',
'ErrorCode': 'unauthorized access',
'Response': {},
}
"""
def __init__(self, max_life=datetime.timedelta(minutes=2), io_loop=None):
self._max_life = max_life
if io_loop is None:
io_loop = IOLoop.current()
self._io_loop = io_loop
self._data = {}
def token_requested(self, data):
"""Does data represent a token creation request? True or False."""
return (
'RequestId' in data and
data.get('Type', None) == 'GUIToken' and
data.get('Request', None) == 'Create'
)
def process_token_request(self, data, user, write_message):
"""Create a single-use, time-expired token and send it back."""
if not user.is_authenticated:
write_message(dict(
RequestId=data['RequestId'],
Error='tokens can only be created by authenticated users.',
ErrorCode='unauthorized access',
Response={}))
return
token = uuid.uuid4().hex
def expire_token():
self._data.pop(token, None)
logging.info('auth: expired token {}'.format(token))
handle = self._io_loop.add_timeout(self._max_life, expire_token)
now = datetime.datetime.utcnow()
# Stashing these is a security risk. We currently deem this risk to
# be acceptably small. Even keeping an authenticated websocket in
# memory seems to be of a similar risk profile, and we cannot operate
# without that.
self._data[token] = dict(
username=user.username,
password=user.password,
handle=handle
)
write_message({
'RequestId': data['RequestId'],
'Response': {
'Token': token,
'Created': now.isoformat() + 'Z',
'Expires': (now + self._max_life).isoformat() + 'Z'
}
})
def authentication_requested(self, data):
"""Does data represent a token authentication request? True or False.
"""
params = data.get('Params', {})
return (
'RequestId' in data and
data.get('Type') == 'GUIToken' and
data.get('Request') == 'Login' and
'Token' in params
)
def process_authentication_request(self, data, write_message):
"""Get the credentials for the token, or send an error."""
token = data['Params']['Token']
credentials = self._data.pop(token, None)
if credentials is not None:
logging.info('auth: using token {}'.format(token))
self._io_loop.remove_timeout(credentials['handle'])
return credentials['username'], credentials['password']
else:
write_message({
'RequestId': data['RequestId'],
'Error': 'unknown, fulfilled, or expired token',
'ErrorCode': 'unauthorized access',
'Response': {},
})
# None is an explicit return marker to say "I handled this".
# It is returned by default.
def process_authentication_response(self, data, user):
"""Make a successful token authentication response.
This includes the username and password so that clients can then use
them. For instance, the GUI stashes them in session storage so that
reloading the page does not require logging in again."""
return {
'RequestId': data['RequestId'],
'Response': {'AuthTag': user.username, 'Password': user.password}
}
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.