repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
yunxliu/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_media-src_corss-origin_audio_blocked_ext.py | 30 | 3126 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_media-src_cross-origin_audio_blocked_ext</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="media-src http://www.w3.org; script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<audio id="m"></audio>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = '""" + url1 + """/tests/csp/support/khronos/red-green.theora.ogv';
window.setTimeout(function() {
t.step(function() {
assert_true(m.currentSrc == "",
"audio.currentSrc should be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
| bsd-3-clause | 3,516,255,804,027,830,000 | 43.028169 | 98 | 0.684261 | false |
royc1/gpdb | gpMgmt/bin/ext/figleaf/annotate_cover.py | 35 | 3696 | import figleaf
import os
import re
from annotate import read_exclude_patterns, filter_files, logger
def report_as_cover(coverage, exclude_patterns=[], ):
### now, output.
keys = coverage.keys()
info_dict = {}
for k in filter_files(keys):
try:
pyfile = open(k, 'rU')
lines = figleaf.get_lines(pyfile)
except IOError:
logger.warning('CANNOT OPEN: %s' % k)
continue
except KeyboardInterrupt:
raise
except Exception, e:
logger.error('ERROR: file %s, exception %s' % (pyfile, str(e)))
continue
# ok, got all the info. now annotate file ==> html.
covered = coverage[k]
pyfile = open(k)
(n_covered, n_lines, output) = make_cover_lines(lines, covered, pyfile)
try:
pcnt = n_covered * 100. / n_lines
except ZeroDivisionError:
pcnt = 100
info_dict[k] = (n_lines, n_covered, pcnt)
outfile = make_cover_filename(k)
try:
outfp = open(outfile, 'w')
outfp.write("\n".join(output))
outfp.write("\n")
outfp.close()
except IOError:
logger.warning('cannot open filename %s' % (outfile,))
continue
logger.info('reported on %s' % (outfile,))
### print a summary, too.
info_dict_items = info_dict.items()
def sort_by_pcnt(a, b):
a = a[1][2]
b = b[1][2]
return -cmp(a,b)
info_dict_items.sort(sort_by_pcnt)
logger.info('reported on %d file(s) total\n' % len(info_dict))
return len(info_dict)
def make_cover_lines(line_info, coverage_info, fp):
n_covered = n_lines = 0
output = []
for i, line in enumerate(fp):
is_covered = False
is_line = False
i += 1
if i in coverage_info:
is_covered = True
prefix = '+'
n_covered += 1
n_lines += 1
elif i in line_info:
prefix = '-'
is_line = True
n_lines += 1
else:
prefix = '0'
line = line.rstrip()
output.append(prefix + ' ' + line)
return (n_covered, n_lines, output)
def make_cover_filename(orig):
return orig + '.cover'
def main():
import sys
import logging
from optparse import OptionParser
###
option_parser = OptionParser()
option_parser.add_option('-x', '--exclude-patterns', action="store",
dest="exclude_patterns_file",
help="file containing regexp patterns to exclude")
option_parser.add_option('-q', '--quiet', action='store_true',
dest='quiet',
help="file containig regexp patterns of files to exclude from report")
option_parser.add_option('-D', '--debug', action='store_true',
dest='debug',
help='Show all debugging messages')
(options, args) = option_parser.parse_args()
if options.quiet:
logging.disable(logging.DEBUG)
if options.debug:
logger.setLevel(logging.DEBUG)
### load
if not args:
args = ['.figleaf']
coverage = {}
for filename in args:
logger.debug("loading coverage info from '%s'\n" % (filename,))
d = figleaf.read_coverage(filename)
coverage = figleaf.combine_coverage(coverage, d)
if not coverage:
logger.warning('EXITING -- no coverage info!\n')
sys.exit(-1)
exclude = read_exclude_patterns(options.exclude_patterns_file)
report_as_cover(coverage, exclude)
| apache-2.0 | 1,517,335,854,399,543,300 | 24.846154 | 79 | 0.538149 | false |
burzillibus/RobHome | venv/lib/python2.7/site-packages/docutils/__init__.py | 6 | 8968 | # $Id: __init__.py 8147 2017-08-03 09:01:16Z grubert $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This is the Docutils (Python Documentation Utilities) package.
Package Structure
=================
Modules:
- __init__.py: Contains component base classes, exception classes, and
Docutils version information.
- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
functions.
- frontend.py: Runtime settings (command-line interface, configuration files)
processing, for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
- nodes.py: Docutils document tree (doctree) node class library.
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
Subpackages:
- languages: Language-specific mappings of terms.
- parsers: Syntax-specific input parser modules or packages.
- readers: Context-specific input handlers which understand the data
source and manage a parser.
- transforms: Modules used by readers and writers to modify DPS
doctrees.
- utils: Contains the ``Reporter`` system warning class and miscellaneous
utilities used by readers, writers, and transforms.
utils/urischemes.py: Contains a complete mapping of known URI addressing
scheme names to descriptions.
- utils/math: Contains functions for conversion of mathematical notation
between different formats (LaTeX, MathML, text, ...).
- writers: Format-specific output translators.
"""
import sys
__docformat__ = 'reStructuredText'
__version__ = '0.14'
"""Docutils version identifier (complies with PEP 440)::
major.minor[.micro][releaselevel[serial]][.dev]
* The major number will be bumped when the project is feature-complete, and
later if there is a major change in the design or API.
* The minor number is bumped whenever there are new features.
* The micro number is bumped for bug-fix releases. Omitted if micro=0.
* The releaselevel identifier is used for pre-releases, one of 'a' (alpha),
'b' (beta), or 'rc' (release candidate). Omitted for final releases.
* The serial release number identifies prereleases; omitted if 0.
* The '.dev' suffix indicates active development, not a release, before the
version indicated.
For version comparison operations, use `__version_info__`
rather than parsing the text of `__version__`.
"""
# workaround for Python < 2.6:
__version_info__ = (0, 14, 0, 'final', 0, True)
# To add in Docutils 0.15, replacing the line above:
"""
from collections import namedtuple
VersionInfo = namedtuple(
'VersionInfo', 'major minor micro releaselevel serial release')
__version_info__ = VersionInfo(
major=0,
minor=15,
micro=0,
releaselevel='alpha', # development status:
# one of 'alpha', 'beta', 'candidate', 'final'
serial=0, # pre-release number (0 for final releases)
release=False # True for official releases and pre-releases
)
Comprehensive version information tuple. Can be used to test for a
minimally required version, e.g. ::
if __version_info__ >= (0, 13, 0, 'candidate', 2, True)
or in a self-documenting way like ::
if __version_info__ >= docutils.VersionInfo(
major=0, minor=13, micro=0,
releaselevel='candidate', serial=2, release=True)
"""
__version_details__ = ''
"""Optional extra version details (e.g. 'snapshot 2005-05-29, r3410').
(For development and release status see `__version_info__`.)
"""
class ApplicationError(StandardError):
# Workaround:
# In Python < 2.6, unicode(<exception instance>) calls `str` on the
# arg and therefore, e.g., unicode(StandardError(u'\u234')) fails
# with UnicodeDecodeError.
if sys.version_info < (2,6):
def __unicode__(self):
return u', '.join(self.args)
class DataError(ApplicationError): pass
class SettingsSpec:
"""
Runtime setting specification base class.
SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
"""
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
Defines runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This is a tuple of:
- Option group title (string or `None` which implies no group, just a list
of single options).
- Description (string or `None`).
- A sequence of option tuples. Each consists of:
- Help text (string)
- List of option strings (e.g. ``['-Q', '--quux']``).
- Dictionary of keyword arguments sent to the OptionParser/OptionGroup
``add_option`` method.
Runtime setting names are derived implicitly from long option names
('--a-setting' becomes ``settings.a_setting``) or explicitly from the
'dest' keyword argument.
Most settings will also have a 'validator' keyword & function. The
validator function validates setting values (from configuration files
and command-line option arguments) and converts them to appropriate
types. For example, the ``docutils.frontend.validate_boolean``
function, **required by all boolean settings**, converts true values
('1', 'on', 'yes', and 'true') to 1 and false values ('0', 'off',
'no', 'false', and '') to 0. Validators need only be set once per
setting. See the `docutils.frontend.validate_*` functions.
See the optparse docs for more details.
- More triples of group title, description, options, as many times as
needed. Thus, `settings_spec` tuples can be simply concatenated.
"""
settings_defaults = None
"""A dictionary of defaults for settings not in `settings_spec` (internal
settings, intended to be inaccessible by command-line and config file).
Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
defined in other components. Override in subclasses."""
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
Settings listed here are to be interpreted relative to the current working
directory."""
config_section = None
"""The name of the config file section specific to this component
(lowercase, no brackets). Override in subclasses."""
config_section_dependencies = None
"""A list of names of config file sections that are to be applied before
`config_section`, in order (from general to specific). In other words,
the settings in `config_section` are to be overlaid on top of the settings
from these sections. The "general" section is assumed implicitly.
Override in subclasses."""
class TransformSpec:
"""
Runtime transform specification base class.
TransformSpec subclass objects used by `docutils.transforms.Transformer`.
"""
def get_transforms(self):
"""Transforms required by this class. Override in subclasses."""
if self.default_transforms != ():
import warnings
warnings.warn('default_transforms attribute deprecated.\n'
'Use get_transforms() method instead.',
DeprecationWarning)
return list(self.default_transforms)
return []
# Deprecated; for compatibility.
default_transforms = ()
unknown_reference_resolvers = ()
"""List of functions to try to resolve unknown references. Unknown
references have a 'refname' attribute which doesn't correspond to any
target in the document. Called when the transforms in
`docutils.tranforms.references` are unable to find a correct target. The
list should contain functions which will try to resolve unknown
references, with the following signature::
def reference_resolver(node):
'''Returns boolean: true if resolved, false if not.'''
If the function is able to resolve the reference, it should also remove
the 'refname' attribute and mark the node as resolved::
del node['refname']
node.resolved = 1
Each function must have a "priority" attribute which will affect the order
the unknown_reference_resolvers are run::
reference_resolver.priority = 100
Override in subclasses."""
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
"""Name of the component type ('reader', 'parser', 'writer'). Override in
subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
def supports(self, format):
"""
Is `format` supported by this component?
To be used by transforms to ask the dependent component if it supports
a certain input context or output format.
"""
return format in self.supported
| mit | 6,584,559,247,334,019,000 | 33.229008 | 78 | 0.689451 | false |
krishnabangalore/Webinos-Platform | experiments/windowsInstaller/CreateDis.py | 3 | 2228 | import os
import sys
import shutil
import js2c
def GetSourceFiles(webinos_root):
jsFiles = []
jsonFiles = []
packageFiles = []
nodeFiles = []
nodeModuleDirs = []
for root, dirs, files in os.walk(webinos_root):
if not 'node_modules' in root.lower() and not "wrt" in root.lower() and not "test" in root.lower() and not "vb-sim" in root.lower():
for f in files:
if f.endswith(".js"):
jsFiles.append(os.path.realpath(os.path.join(root,f)))
# elif f.lower() == "package.json":
# jsFiles.append(os.path.realpath(os.path.join(root,f)))
elif f.endswith(".json"):
jsFiles.append(os.path.realpath(os.path.join(root,f)))
elif f.endswith(".node"):
nodeFiles.append(os.path.realpath(os.path.join(root,f))[len(webinos_root):])
for d in dirs:
if d.lower() == 'node_modules':
nodeModuleDirs.append(os.path.realpath(os.path.join(root,d))[len(webinos_root):])
return [jsFiles, jsonFiles, packageFiles, nodeFiles, nodeModuleDirs]
def CopyBinAndModules(nodeFiles,nodeModuleDirs,webinos_root,destination):
# Clear previous contents
shutil.rmtree(destination, True)
os.makedirs(destination+'/node_modules')
os.makedirs(destination+'/bin')
for modDir in nodeModuleDirs:
modulesRootDir = webinos_root+modDir
for nMod in os.listdir(modulesRootDir):
if os.path.isdir(os.path.join(modulesRootDir,nMod)):
shutil.copytree(os.path.join(modulesRootDir,nMod),os.path.join(destination,'node_modules',nMod))
else:
shutil.copy(os.path.join(modulesRootDir,nMod),os.path.join(destination,'node_modules'))
for nFile in nodeFiles:
shutil.copy(webinos_root+'/'+nFile, destination +'/bin')
def main():
webinos_root = os.path.realpath(os.path.join(os.path.dirname(__file__),"../../webinos"))
destinationDir = os.path.realpath(os.path.join(os.path.dirname(__file__),"WebinosDist"))
source_files = GetSourceFiles(webinos_root)
CopyBinAndModules(source_files[3],source_files[4],webinos_root,destinationDir)
# print source_files
js2c.JS2C(source_files[0],webinos_root, source_files[3], ["WebinosDist\webinos_natives.h"])
if __name__ == "__main__":
main() | apache-2.0 | -2,389,168,773,531,733,500 | 41.056604 | 136 | 0.671903 | false |
JuniorJPDJ/pyChomikBox | ChomikBox/utils/FileTransferProgressBar.py | 1 | 3099 | import time
import sys
def sizeof_fmt(num, unit='B'):
# source: http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for uprexif in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{:3.2f} {}{}".format(num, uprexif, unit)
num /= 1024.0
return "{:3.2f} Yi{}".format(num, unit)
output = sys.stderr
progress_format = '{n} [{b}] {p:3.1f}% ({d}/{a}) {s}'
class FileTransferProgressBar(object):
# inspired by clint.textui.progress.Bar
def __init__(self, filesize, name='', width=32, empty_char=' ', filled_char='#', hide=None, speed_update=0.2,
bar_update=0.05, progress_format=progress_format):
self.name, self.filesize, self.width, self.ec, self.fc = name, filesize, width, empty_char, filled_char
self.speed_update, self.bar_update, self.progress_format = speed_update, bar_update, progress_format
if hide is None:
try:
self.hide = not output.isatty()
except AttributeError:
self.hide = True
else:
self.hide = hide
self.last_progress = 0
self.last_time = time.time()
self.last_speed_update = self.last_time
self.start_time = self.last_time
self.last_speed_progress = 0
self.last_speed = 0
self.max_bar_size = 0
def show(self, progress):
if time.time() - self.last_time > self.bar_update:
self.last_time = time.time()
self.last_progress = progress
if self.last_time - self.last_speed_update > self.speed_update:
self.last_speed = (self.last_speed_progress - progress) / float(self.last_speed_update - self.last_time)
self.last_speed_update = self.last_time
self.last_speed_progress = progress
status = self.width * progress // self.filesize
percent = float(progress * 100) / self.filesize
bar = self.progress_format.format(n=self.name, b=self.fc * status + self.ec * (self.width - status),
p=percent, d=sizeof_fmt(progress), a=sizeof_fmt(self.filesize),
s=sizeof_fmt(self.last_speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r' # workaround for ghosts
output.write(bar)
output.flush()
def done(self):
speed = self.filesize / float(time.time() - self.start_time)
bar = self.progress_format.format(n=self.name, b=self.fc * self.width, p=100, d=sizeof_fmt(self.filesize),
a=sizeof_fmt(self.filesize), s=sizeof_fmt(speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r'
output.write(bar)
output.write('\n')
output.flush()
| lgpl-3.0 | 2,277,270,958,108,844,000 | 45.253731 | 120 | 0.558567 | false |
alfa-addon/addon | plugin.video.alfa/core/scrapertoolsV2.py | 1 | 11697 | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Scraper tools v2 for reading and processing web elements
# --------------------------------------------------------------------------------
#from future import standard_library
#standard_library.install_aliases()
#from builtins import str
#from builtins import chr
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
import time
from core.entities import html5
from platformcode import logger
def printMatches(matches):
i = 0
for match in matches:
logger.info("%d %s" % (i, match))
i = i + 1
def find_single_match(data, patron, index=0):
try:
matches = re.findall(patron, data, flags=re.DOTALL)
return matches[index]
except:
return ""
# Parse string and extracts multiple matches using regular expressions
def find_multiple_matches(text, pattern):
return re.findall(pattern, text, re.DOTALL)
# Convierte los codigos html "ñ" y lo reemplaza por "ñ" caracter unicode utf-8
def decodeHtmlentities(data):
entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8})(;?)")
def substitute_entity(match):
ent = match.group(2) + match.group(3)
res = ""
while not ent in html5 and not ent.endswith(";") and match.group(1) != "#":
# Excepción para cuando '&' se usa como argumento en la urls contenidas en los datos
try:
res = ent[-1] + res
ent = ent[:-1]
except:
break
if match.group(1) == "#":
ent = unichr(int(ent.replace(";", "")))
return ent.encode('utf-8')
else:
cp = html5.get(ent)
if cp:
return cp.decode("unicode-escape").encode('utf-8') + res
else:
return match.group()
return entity_re.subn(substitute_entity, data)[0]
def htmlclean(cadena):
cadena = re.compile("<!--.*?-->", re.DOTALL).sub("", cadena)
cadena = cadena.replace("<center>", "")
cadena = cadena.replace("</center>", "")
cadena = cadena.replace("<cite>", "")
cadena = cadena.replace("</cite>", "")
cadena = cadena.replace("<em>", "")
cadena = cadena.replace("</em>", "")
cadena = cadena.replace("<u>", "")
cadena = cadena.replace("</u>", "")
cadena = cadena.replace("<li>", "")
cadena = cadena.replace("</li>", "")
cadena = cadena.replace("<turl>", "")
cadena = cadena.replace("</tbody>", "")
cadena = cadena.replace("<tr>", "")
cadena = cadena.replace("</tr>", "")
cadena = cadena.replace("<![CDATA[", "")
cadena = cadena.replace("<wbr>", "")
cadena = cadena.replace("<Br />", " ")
cadena = cadena.replace("<BR />", " ")
cadena = cadena.replace("<Br>", " ")
cadena = re.compile("<br[^>]*>", re.DOTALL).sub(" ", cadena)
cadena = re.compile("<script.*?</script>", re.DOTALL).sub("", cadena)
cadena = re.compile("<option[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</option>", "")
cadena = re.compile("<button[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</button>", "")
cadena = re.compile("<i[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</iframe>", "")
cadena = cadena.replace("</i>", "")
cadena = re.compile("<table[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</table>", "")
cadena = re.compile("<td[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</td>", "")
cadena = re.compile("<div[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</div>", "")
cadena = re.compile("<dd[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</dd>", "")
cadena = re.compile("<b[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</b>", "")
cadena = re.compile("<font[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</font>", "")
cadena = re.compile("<strong[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</strong>", "")
cadena = re.compile("<small[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</small>", "")
cadena = re.compile("<span[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</span>", "")
cadena = re.compile("<a[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</a>", "")
cadena = re.compile("<p[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</p>", "")
cadena = re.compile("<ul[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</ul>", "")
cadena = re.compile("<h1[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</h1>", "")
cadena = re.compile("<h2[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</h2>", "")
cadena = re.compile("<h3[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</h3>", "")
cadena = re.compile("<h4[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</h4>", "")
cadena = re.compile("<!--[^-]+-->", re.DOTALL).sub("", cadena)
cadena = re.compile("<img[^>]*>", re.DOTALL).sub("", cadena)
cadena = re.compile("<object[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</object>", "")
cadena = re.compile("<param[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</param>", "")
cadena = re.compile("<embed[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</embed>", "")
cadena = re.compile("<title[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("</title>", "")
cadena = re.compile("<link[^>]*>", re.DOTALL).sub("", cadena)
cadena = cadena.replace("\t", "")
# cadena = entityunescape(cadena)
return cadena
def slugify(title):
# print title
# Sustituye acentos y eñes
title = title.replace("Á", "a")
title = title.replace("É", "e")
title = title.replace("Í", "i")
title = title.replace("Ó", "o")
title = title.replace("Ú", "u")
title = title.replace("á", "a")
title = title.replace("é", "e")
title = title.replace("í", "i")
title = title.replace("ó", "o")
title = title.replace("ú", "u")
title = title.replace("À", "a")
title = title.replace("È", "e")
title = title.replace("Ì", "i")
title = title.replace("Ò", "o")
title = title.replace("Ù", "u")
title = title.replace("à", "a")
title = title.replace("è", "e")
title = title.replace("ì", "i")
title = title.replace("ò", "o")
title = title.replace("ù", "u")
title = title.replace("ç", "c")
title = title.replace("Ç", "C")
title = title.replace("Ñ", "n")
title = title.replace("ñ", "n")
title = title.replace("/", "-")
title = title.replace("&", "&")
# Pasa a minúsculas
title = title.lower().strip()
# Elimina caracteres no válidos
validchars = "abcdefghijklmnopqrstuvwxyz1234567890- "
title = ''.join(c for c in title if c in validchars)
# Sustituye espacios en blanco duplicados y saltos de línea
title = re.compile("\s+", re.DOTALL).sub(" ", title)
# Sustituye espacios en blanco por guiones
title = re.compile("\s", re.DOTALL).sub("-", title.strip())
# Sustituye espacios en blanco duplicados y saltos de línea
title = re.compile("\-+", re.DOTALL).sub("-", title)
# Arregla casos especiales
if title.startswith("-"):
title = title[1:]
if title == "":
title = "-" + str(time.time())
return title
def remove_htmltags(string):
return re.sub('<[^<]+?>', '', string)
def remove_show_from_title(title, show):
# print slugify(title)+" == "+slugify(show)
# Quita el nombre del programa del título
if slugify(title).startswith(slugify(show)):
# Convierte a unicode primero, o el encoding se pierde
title = unicode(title, "utf-8", "replace")
show = unicode(show, "utf-8", "replace")
title = title[len(show):].strip()
if title.startswith("-"):
title = title[1:].strip()
if title == "":
title = str(time.time())
# Vuelve a utf-8
title = title.encode("utf-8", "ignore")
show = show.encode("utf-8", "ignore")
return title
# scrapertools.get_filename_from_url(media_url)[-4:]
def get_filename_from_url(url):
parsed_url = urlparse.urlparse(url)
try:
filename = parsed_url.path
except:
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
if len(parsed_url) >= 4:
filename = parsed_url[2]
else:
filename = ""
if "/" in filename:
filename = filename.split("/")[-1]
return filename
def get_domain_from_url(url):
parsed_url = urlparse.urlparse(url)
try:
filename = parsed_url.netloc
except:
# Si falla es porque la implementación de parsed_url no reconoce los atributos como "path"
if len(parsed_url) >= 4:
filename = parsed_url[1]
else:
filename = ""
return filename
def get_season_and_episode(title):
"""
Retorna el numero de temporada y de episodio en formato "1x01" obtenido del titulo de un episodio
Ejemplos de diferentes valores para title y su valor devuelto:
"serie 101x1.strm", "s101e1.avi", "t101e1.avi" -> '101x01'
"Name TvShow 1x6.avi" -> '1x06'
"Temp 3 episodio 2.avi" -> '3x02'
"Alcantara season 13 episodie 12.avi" -> '13x12'
"Temp1 capitulo 14" -> '1x14'
"Temporada 1: El origen Episodio 9" -> '' (entre el numero de temporada y los episodios no puede haber otro texto)
"Episodio 25: titulo episodio" -> '' (no existe el numero de temporada)
"Serie X Temporada 1" -> '' (no existe el numero del episodio)
@type title: str
@param title: titulo del episodio de una serie
@rtype: str
@return: Numero de temporada y episodio en formato "1x01" o cadena vacia si no se han encontrado
"""
filename = ""
patrons = ["(\d+)x(\d+)", "(?:s|t)(\d+)e(\d+)",
"(?:season|temp\w*)\s*(\d+)\s*(?:capitulo|epi\w*)\s*(\d+)"]
for patron in patrons:
try:
matches = re.compile(patron, re.I).search(title)
if matches:
filename = matches.group(1) + "x" + matches.group(2).zfill(2)
break
except:
pass
logger.info("'" + title + "' -> '" + filename + "'")
return filename
def get_sha1(cadena):
try:
import hashlib
devuelve = hashlib.sha1(cadena).hexdigest()
except:
import sha
import binascii
devuelve = binascii.hexlify(sha.new(cadena).digest())
return devuelve
def get_md5(cadena):
try:
import hashlib
devuelve = hashlib.md5(cadena).hexdigest()
except:
import md5
import binascii
devuelve = binascii.hexlify(md5.new(cadena).digest())
return devuelve
| gpl-3.0 | -8,660,735,414,594,733,000 | 31.127841 | 122 | 0.537775 | false |
lanfker/tdma_imac | .waf-1.6.7-0a94702c61504c487a251b8d0a04ca9a/waflib/Tools/cxx.py | 3 | 1250 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import TaskGen,Task,Utils
from waflib.Tools import c_preproc
from waflib.Tools.ccroot import link_task,stlink_task
def cxx_hook(self,node):
return self.create_compiled_task('cxx',node)
TaskGen.extension('.cpp','.cc','.cxx','.C','.c++')(cxx_hook)
if not'.c'in TaskGen.task_gen.mappings:
TaskGen.task_gen.mappings['.c']=TaskGen.task_gen.mappings['.cpp']
class cxx(Task.Task):
run_str='${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CPPFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
vars=['CXXDEPS']
ext_in=['.h']
scan=c_preproc.scan
class cxxprogram(link_task):
run_str='${LINK_CXX} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LINKFLAGS}'
vars=['LINKDEPS']
ext_out=['.bin']
inst_to='${BINDIR}'
chmod=Utils.O755
class cxxshlib(cxxprogram):
inst_to='${LIBDIR}'
class cxxstlib(stlink_task):
pass
| gpl-2.0 | -8,412,443,015,879,089,000 | 45.296296 | 298 | 0.7016 | false |
GenericStudent/home-assistant | tests/components/alert/test_init.py | 8 | 11457 | """The tests for the Alert component."""
# pylint: disable=protected-access
from copy import deepcopy
import unittest
import homeassistant.components.alert as alert
from homeassistant.components.alert import DOMAIN
import homeassistant.components.notify as notify
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ENTITY_ID,
CONF_NAME,
CONF_STATE,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
NAME = "alert_test"
DONE_MESSAGE = "alert_gone"
NOTIFIER = "test"
TEMPLATE = "{{ states.sensor.test.entity_id }}"
TEST_ENTITY = "sensor.test"
TITLE = "{{ states.sensor.test.entity_id }}"
TEST_TITLE = "sensor.test"
TEST_DATA = {"data": {"inline_keyboard": ["Close garage:/close_garage"]}}
TEST_CONFIG = {
alert.DOMAIN: {
NAME: {
CONF_NAME: NAME,
alert.CONF_DONE_MESSAGE: DONE_MESSAGE,
CONF_ENTITY_ID: TEST_ENTITY,
CONF_STATE: STATE_ON,
alert.CONF_REPEAT: 30,
alert.CONF_SKIP_FIRST: False,
alert.CONF_NOTIFIERS: [NOTIFIER],
alert.CONF_TITLE: TITLE,
alert.CONF_DATA: {},
}
}
}
TEST_NOACK = [
NAME,
NAME,
"sensor.test",
STATE_ON,
[30],
False,
None,
None,
NOTIFIER,
False,
None,
None,
]
ENTITY_ID = f"{alert.DOMAIN}.{NAME}"
def turn_on(hass, entity_id):
"""Reset the alert.
This is a legacy helper method. Do not use it for new tests.
"""
hass.add_job(async_turn_on, hass, entity_id)
@callback
def async_turn_on(hass, entity_id):
"""Async reset the alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data))
def turn_off(hass, entity_id):
"""Acknowledge alert.
This is a legacy helper method. Do not use it for new tests.
"""
hass.add_job(async_turn_off, hass, entity_id)
@callback
def async_turn_off(hass, entity_id):
"""Async acknowledge the alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data))
def toggle(hass, entity_id):
"""Toggle acknowledgment of alert.
This is a legacy helper method. Do not use it for new tests.
"""
hass.add_job(async_toggle, hass, entity_id)
@callback
def async_toggle(hass, entity_id):
"""Async toggle acknowledgment of alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data))
# pylint: disable=invalid-name
class TestAlert(unittest.TestCase):
"""Test the alert module."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self._setup_notify()
self.addCleanup(self.hass.stop)
def _setup_notify(self):
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(notify.DOMAIN, NOTIFIER, record_event)
return events
def test_is_on(self):
"""Test is_on method."""
self.hass.states.set(ENTITY_ID, STATE_ON)
self.hass.block_till_done()
assert alert.is_on(self.hass, ENTITY_ID)
self.hass.states.set(ENTITY_ID, STATE_OFF)
self.hass.block_till_done()
assert not alert.is_on(self.hass, ENTITY_ID)
def test_setup(self):
"""Test setup method."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
assert STATE_IDLE == self.hass.states.get(ENTITY_ID).state
def test_fire(self):
"""Test the alert firing."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert STATE_ON == self.hass.states.get(ENTITY_ID).state
def test_silence(self):
"""Test silencing the alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert STATE_OFF == self.hass.states.get(ENTITY_ID).state
# alert should not be silenced on next fire
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
assert STATE_IDLE == self.hass.states.get(ENTITY_ID).state
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert STATE_ON == self.hass.states.get(ENTITY_ID).state
def test_reset(self):
"""Test resetting the alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert STATE_OFF == self.hass.states.get(ENTITY_ID).state
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert STATE_ON == self.hass.states.get(ENTITY_ID).state
def test_toggle(self):
"""Test toggling alert."""
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert STATE_ON == self.hass.states.get(ENTITY_ID).state
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert STATE_OFF == self.hass.states.get(ENTITY_ID).state
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert STATE_ON == self.hass.states.get(ENTITY_ID).state
def test_notification_no_done_message(self):
"""Test notifications."""
events = []
config = deepcopy(TEST_CONFIG)
del config[alert.DOMAIN][NAME][alert.CONF_DONE_MESSAGE]
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, config)
assert len(events) == 0
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert len(events) == 1
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
assert len(events) == 1
def test_notification(self):
"""Test notifications."""
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
assert len(events) == 0
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert len(events) == 1
self.hass.states.set("sensor.test", STATE_OFF)
self.hass.block_till_done()
assert len(events) == 2
def test_sending_non_templated_notification(self):
"""Test notifications."""
events = self._setup_notify()
assert setup_component(self.hass, alert.DOMAIN, TEST_CONFIG)
self.hass.states.set(TEST_ENTITY, STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
last_event = events[-1]
self.assertEqual(last_event.data[notify.ATTR_MESSAGE], NAME)
def test_sending_templated_notification(self):
"""Test templated notification."""
events = self._setup_notify()
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_ALERT_MESSAGE] = TEMPLATE
assert setup_component(self.hass, alert.DOMAIN, config)
self.hass.states.set(TEST_ENTITY, STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
last_event = events[-1]
self.assertEqual(last_event.data[notify.ATTR_MESSAGE], TEST_ENTITY)
def test_sending_templated_done_notification(self):
"""Test templated notification."""
events = self._setup_notify()
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_DONE_MESSAGE] = TEMPLATE
assert setup_component(self.hass, alert.DOMAIN, config)
self.hass.states.set(TEST_ENTITY, STATE_ON)
self.hass.block_till_done()
self.hass.states.set(TEST_ENTITY, STATE_OFF)
self.hass.block_till_done()
self.assertEqual(2, len(events))
last_event = events[-1]
self.assertEqual(last_event.data[notify.ATTR_MESSAGE], TEST_ENTITY)
def test_sending_titled_notification(self):
"""Test notifications."""
events = self._setup_notify()
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_TITLE] = TITLE
assert setup_component(self.hass, alert.DOMAIN, config)
self.hass.states.set(TEST_ENTITY, STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
last_event = events[-1]
self.assertEqual(last_event.data[notify.ATTR_TITLE], TEST_TITLE)
def test_sending_data_notification(self):
"""Test notifications."""
events = self._setup_notify()
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_DATA] = TEST_DATA
assert setup_component(self.hass, alert.DOMAIN, config)
self.hass.states.set(TEST_ENTITY, STATE_ON)
self.hass.block_till_done()
self.assertEqual(1, len(events))
last_event = events[-1]
self.assertEqual(last_event.data[notify.ATTR_DATA], TEST_DATA)
def test_skipfirst(self):
"""Test skipping first notification."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_SKIP_FIRST] = True
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.services.register(notify.DOMAIN, NOTIFIER, record_event)
assert setup_component(self.hass, alert.DOMAIN, config)
assert len(events) == 0
self.hass.states.set("sensor.test", STATE_ON)
self.hass.block_till_done()
assert len(events) == 0
def test_noack(self):
"""Test no ack feature."""
entity = alert.Alert(self.hass, *TEST_NOACK)
self.hass.add_job(entity.begin_alerting)
self.hass.block_till_done()
def test_done_message_state_tracker_reset_on_cancel(self):
"""Test that the done message is reset when canceled."""
entity = alert.Alert(self.hass, *TEST_NOACK)
entity._cancel = lambda *args: None
assert entity._send_done_message is False
entity._send_done_message = True
self.hass.add_job(entity.end_alerting)
self.hass.block_till_done()
assert entity._send_done_message is False
| apache-2.0 | -1,266,166,287,685,687,000 | 31.273239 | 84 | 0.628 | false |
Cinntax/home-assistant | tests/components/config/test_area_registry.py | 4 | 4008 | """Test area_registry API."""
import pytest
from homeassistant.components.config import area_registry
from tests.common import mock_area_registry
@pytest.fixture
def client(hass, hass_ws_client):
"""Fixture that can interact with the config manager API."""
hass.loop.run_until_complete(area_registry.async_setup(hass))
yield hass.loop.run_until_complete(hass_ws_client(hass))
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_area_registry(hass)
async def test_list_areas(hass, client, registry):
"""Test list entries."""
registry.async_create("mock 1")
registry.async_create("mock 2")
await client.send_json({"id": 1, "type": "config/area_registry/list"})
msg = await client.receive_json()
assert len(msg["result"]) == len(registry.areas)
async def test_create_area(hass, client, registry):
"""Test create entry."""
await client.send_json(
{"id": 1, "name": "mock", "type": "config/area_registry/create"}
)
msg = await client.receive_json()
assert "mock" in msg["result"]["name"]
assert len(registry.areas) == 1
async def test_create_area_with_name_already_in_use(hass, client, registry):
"""Test create entry that should fail."""
registry.async_create("mock")
await client.send_json(
{"id": 1, "name": "mock", "type": "config/area_registry/create"}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == "invalid_info"
assert msg["error"]["message"] == "Name is already in use"
assert len(registry.areas) == 1
async def test_delete_area(hass, client, registry):
"""Test delete entry."""
area = registry.async_create("mock")
await client.send_json(
{"id": 1, "area_id": area.id, "type": "config/area_registry/delete"}
)
msg = await client.receive_json()
assert msg["success"]
assert not registry.areas
async def test_delete_non_existing_area(hass, client, registry):
"""Test delete entry that should fail."""
registry.async_create("mock")
await client.send_json(
{"id": 1, "area_id": "", "type": "config/area_registry/delete"}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == "invalid_info"
assert msg["error"]["message"] == "Area ID doesn't exist"
assert len(registry.areas) == 1
async def test_update_area(hass, client, registry):
"""Test update entry."""
area = registry.async_create("mock 1")
await client.send_json(
{
"id": 1,
"area_id": area.id,
"name": "mock 2",
"type": "config/area_registry/update",
}
)
msg = await client.receive_json()
assert msg["result"]["area_id"] == area.id
assert msg["result"]["name"] == "mock 2"
assert len(registry.areas) == 1
async def test_update_area_with_same_name(hass, client, registry):
"""Test update entry."""
area = registry.async_create("mock 1")
await client.send_json(
{
"id": 1,
"area_id": area.id,
"name": "mock 1",
"type": "config/area_registry/update",
}
)
msg = await client.receive_json()
assert msg["result"]["area_id"] == area.id
assert msg["result"]["name"] == "mock 1"
assert len(registry.areas) == 1
async def test_update_area_with_name_already_in_use(hass, client, registry):
"""Test update entry."""
area = registry.async_create("mock 1")
registry.async_create("mock 2")
await client.send_json(
{
"id": 1,
"area_id": area.id,
"name": "mock 2",
"type": "config/area_registry/update",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == "invalid_info"
assert msg["error"]["message"] == "Name is already in use"
assert len(registry.areas) == 2
| apache-2.0 | -2,389,402,687,643,168,300 | 25.72 | 76 | 0.606038 | false |
shenqicang/openmc | tests/test_tally_assumesep/results.py | 1 | 1251 | #!/usr/bin/env python
import sys
import numpy as np
# import statepoint
sys.path.append('../../src/utils')
import statepoint
# read in statepoint file
if len(sys.argv) > 1:
sp = statepoint.StatePoint(sys.argv[1])
else:
sp = statepoint.StatePoint('statepoint.10.binary')
sp.read_results()
# extract tally results and convert to vector
results1 = sp.tallies[0].results
shape1 = results1.shape
size1 = (np.product(shape1))
results1 = np.reshape(results1, size1)
results2 = sp.tallies[1].results
shape2 = results2.shape
size2 = (np.product(shape2))
results2 = np.reshape(results2, size2)
results3 = sp.tallies[2].results
shape3 = results3.shape
size3 = (np.product(shape3))
results3 = np.reshape(results3, size3)
# set up output string
outstr = ''
# write out k-combined
outstr += 'k-combined:\n'
outstr += "{0:12.6E} {1:12.6E}\n".format(sp.k_combined[0], sp.k_combined[1])
# write out tally results
outstr += 'tally 1:\n'
for item in results1:
outstr += "{0:12.6E}\n".format(item)
outstr += 'tally 2:\n'
for item in results2:
outstr += "{0:12.6E}\n".format(item)
outstr += 'tally 3:\n'
for item in results3:
outstr += "{0:12.6E}\n".format(item)
# write results to file
with open('results_test.dat','w') as fh:
fh.write(outstr)
| mit | -766,772,619,525,675,100 | 23.529412 | 76 | 0.693845 | false |
DataBassDroppers/291ProjectOne | Patient_Info_Update.py | 1 | 18073 | import cx_Oracle
import getpass #gets password without echoing
import random
import datetime
class Patient_Info_Update():
def __init__(self):
pass
def main(self, credentials):
self.con = cx_Oracle.connect(credentials[0] + '/' + \
credentials[1] + '@gwynne.cs.ualberta.ca:1521/CRS')
state = self.getInputs()
if state == 0:
return 1
self.executeStatement(state)
self.con.close()
return 1
def printOptions(self):
print()
print("[1] Enter new Patient")
print("[2] Edit Existing Patient")
print("[3] Return to main menu.")
def getInputs(self):
while 1:
self.name_update = False
self.address_update = False
self.birth_update = False
self.phone_update = False
self.printOptions()
ans = input("Enter a choice: ")
if ans == "1":
self.HCN = self.getUniqueHCN()
self.printSeparator()
self.name = self.getName()
go=True
self.printSeparator()
while go:
self.address,go = self.getAddress()
go=True
self.printSeparator()
while go:
self.birth,go = self.getBirthDate()
self.printSeparator()
self.phone = self.getPhone()
self.printSeparator()
print("Patient Name: " + self.name)
print("Patient Address: " + self.address)
print("Patient Birth Date: " + self.birth)
print("Patient Phone Number: " + self.phone)
print()
while 1:
conf = input("Confirm information (y/n): ")
if conf == "y":
print("Information confirmed.")
return 1
elif conf == "n":
print("Information not confirmed, returning to start.")
break
else:
print("Invalid choice, pick 'y' or 'n'")
elif ans == "2":
go=True
self.printSeparator()
while go:
self.patient,go = self.getPatient()
not_done = True
while not_done:
curs = self.con.cursor()
curs.execute("select * from patient where health_care_no=" + str(self.patient))
rows = curs.fetchall()
print()
for row in rows:
list1=[]
counter=0
for x in row:
if counter == 3:
if x is not None:
x=(x.strftime("%Y-%m-%d %H:%M:%S"))
x=x[:-9]
counter+=1
list1.append(x)
print("Current Information: " + str(tuple(list1)))
print("[1] Update patient name.")
print("[2] Update patient address.")
print("[3] Update patient birth date.")
print("[4] Update patient phone number.")
print("[5] Return to menu.")
check = input("Enter an option: ")
if check == "1":
self.printSeparator()
self.name = self.getName()
self.name_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "2":
go=True
self.printSeparator()
while go:
self.address,go = self.getAddress()
self.address_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "3":
go=True
self.printSeparator()
while go:
self.birth,go = self.getBirthDate()
self.birth_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "4":
self.printSeparator()
self.phone = self.getPhone()
self.phone_update = True
ask = input("Update another value? (y/n): ")
while 1:
if ask == "y":
break
elif ask == "n":
not_done = False
break
else:
print("Invalid input. ")
print()
elif check == "5":
break
else:
print("Invalid input.")
print()
self.printSeparator()
if self.name_update:
print("Patient Name: " + self.name)
if self.address_update:
print("Patient Address: " + self.address)
if self.birth_update:
print("Patient Birth Date: " + self.birth)
if self.phone_update:
print("Patient Phone Number: " + self.phone)
print()
while 1:
conf = input("Confirm updates (y/n): ")
if conf == "y":
print("Information confirmed.")
return 2
elif conf == "n":
print("Information not confirmed, returning to start.")
break
else:
print("Invalid choice, pick 'y' or 'n'")
elif ans == "3":
return 0
else:
print("Invalid choice.")
def input_check(input):
try:
check = eval(input)
if check not in [1,2,3,4,5]:
return 0
else:
return check
except:
return 0
def getPhone(self):
ans = True
while ans:
print()
phone = input("Input Patient Phone Number (10-digits): ")
if phone.isdigit() and len(phone) == 10:
reply = input("Confirm patient number :: " + phone + " :: (y/n): ")
if reply == "y":
ans = False
elif reply == "n":
print("Phone incorrect, returning to start.")
else:
print("Invalid input, returning to start.")
else:
print("Invalid input. Enter phone as a single number without spaces or dashes.")
print()
return phone
def getName(self):
ans = True
while ans:
print()
name = input("Input Patient Name: ")
reply = input("Confirm patient name :: " + name + " :: (y/n): ")
if reply == "y":
ans = False
elif reply == "n":
print("Name incorrect, enter again.")
else:
print("Invalid input, enter again.")
return name
def getAddress(self):
not_allowed = [chr(34), chr(39)]
ans = True
while ans:
print()
address = input("Enter Address: ")
reply = input("Confirm patient address :: " + address + " :: (y/n): ")
if reply == "y":
for each in address:
if each in not_allowed:
print("Apostrophe and Quotation characters are disallowed.")
return False, True
if len(address) > 200:
print("Address entry exceeds character limit of 200.")
return False, True
else:
return address, False
elif reply == "n":
print("Address incorrect, enter again.")
else:
print("Invalid input, enter again.")
def getBirthDate(self):
ans = True
while ans:
print()
string = input('Enter Birth Date "yyyy/mm/dd": ')
if len(string) != 10:
print("Invalid input.")
return False, True
else:
year = string[0:4]
month = string[5:7]
day = string[8:]
correctDate = None
if self.isNumber(year) and self.isNumber(month) and self.isNumber(day) and string[4] == "/" and string[7] == "/":
try:
newDate = datetime.datetime(int(year),int(month),int(day))
correctDate = True
except ValueError:
correctDate = False
if correctDate:
reply = input("Confirm patient birth date :: " + string + " :: (y/n): ")
if reply == "y":
return string,False
elif reply == "n":
print("Birth date incorrect, enter again.")
else:
print("Invalid input, enter again.")
else:
print("Invalid date.")
return False, True
def goodNumber(self,string,case):
if case == "D":
curs = self.con.cursor()
curs.execute("select * from doctor where employee_no like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "T":
curs = self.con.cursor()
curs.execute("select * from test_record where test_id like '"+string+"'")
rows = curs.fetchall()
if len(rows) ==0:
return False
else:
return True
else:
curs = self.con.cursor()
curs.execute("select * from patient where health_care_no like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
def isReal(self,string,case):
if case == "D":
curs = self.con.cursor()
curs.execute("select * from doctor d, patient p where d.health_care_no=p.health_care_no and p.name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "T":
curs = self.con.cursor()
curs.execute("select * from test_type where test_name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "L":
curs = self.con.cursor()
curs.execute("select * from medical_lab where lab_name like '"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
elif case == "R":
curs = self.con.cursor()
curs.execute("select * from test_record where test_id like '"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
else:
curs = self.con.cursor()
curs.execute("select * from patient where name like'"+string+"'")
rows = curs.fetchall()
if len(rows) == 0:
return False
else:
return True
def isNumber(self, string):
return string.isdigit()
# returns the patient_no on success
def getPatient(self):
curs = self.con.cursor()
curs.execute("select name,health_care_no from patient p")
rows = curs.fetchall()
for row in rows:
print(row)
string = input('Enter Patient name or number: ')
if self.isNumber(string):
if self.goodNumber(string,"P"):
return int(string),False
else:
print("Invalid health care number.")
print()
return False,True
else:
if self.isReal(string,"P"):
return self.getPatientNumber(string),False
else:
print(string,"is not a real patient, try again")
return False,True
def getPatientNumber(self,string):
curs = self.con.cursor()
curs.execute("select * from patient p where p.name like '"+string+"'")
rows = curs.fetchall()
tmp = []
if len(rows) > 1:
while 1:
print()
print("Health Care Number | Name | Address | Date of Birth | Phone number")
for row in rows:
print(row)
tmp.append(str(row[0]))
pick = input("Enter ID of correct patient: ")
if pick in tmp:
return pick
else:
print("Incorrect value, enter valid ID of correct patient.")
else:
return rows[0][0]
def printSeparator(self):
print("")
print("-----------------------")
print("")
def getUniqueHCN(self):
curs = self.con.cursor()
curs.execute("select health_care_no from patient")
rows = curs.fetchall()
while (True):
health_care_no = random.randint(0, 10**3)
if all(health_care_no != row[0] for row in rows):
return health_care_no
def executeStatement(self, state):
print("******EXECUTING STATEMENT******")
curs = self.con.cursor()
if state == 1:
try:
curs.execute("insert into patient values (" + str(self.HCN) + ", '" + str(self.name) + "', '" + str(self.address) + "', TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD'), '" + str(self.phone) + "')")
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
elif state == 2:
if self.name_update and self.address_update:
curs.execute("select name, address from patient")
rows = curs.fetchall()
for row in rows:
if row[0] == self.name and row[1] == self.address:
self.printSeparator()
print("SQL Database Violation. Name and Address are a unique key.")
self.printSeparator()
return 0
if self.name_update:
try:
curs.execute("update patient set name='" + str(self.name) + "' where health_care_no=" + str(self.patient))
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
self.printSeparator()
if self.address_update:
try:
curs.execute("update patient set address='" + str(self.address) + "' where health_care_no=" + str(self.patient))
except:
self.printSeparator()
print("SQL Database Violation. Remember, Name and Address are a unique key.")
self.printSeparator()
if self.birth_update:
curs.execute("update patient set birth_day=TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD') where health_care_no=" + str(self.patient))
if self.phone_update:
curs.execute("update patient set phone='" + str(self.phone) + "' where health_care_no=" + str(self.patient))
self.printSeparator()
self.con.commit()
| apache-2.0 | -2,980,646,853,998,649,300 | 35.73374 | 214 | 0.409893 | false |
EmreAtes/spack | lib/spack/spack/hooks/__init__.py | 1 | 2945 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""This package contains modules with hooks for various stages in the
Spack install process. You can add modules here and they'll be
executed by package at various times during the package lifecycle.
Each hook is just a function that takes a package as a parameter.
Hooks are not executed in any particular order.
Currently the following hooks are supported:
* pre_run()
* pre_install(spec)
* post_install(spec)
* pre_uninstall(spec)
* post_uninstall(spec)
This can be used to implement support for things like module
systems (e.g. modules, dotkit, etc.) or to add other custom
features.
"""
import imp
import spack
from llnl.util.filesystem import join_path
from llnl.util.lang import memoized, list_modules
@memoized
def all_hook_modules():
modules = []
for name in list_modules(spack.hooks_path):
mod_name = __name__ + '.' + name
path = join_path(spack.hooks_path, name) + ".py"
mod = imp.load_source(mod_name, path)
modules.append(mod)
return modules
class HookRunner(object):
def __init__(self, hook_name):
self.hook_name = hook_name
def __call__(self, *args, **kwargs):
for module in all_hook_modules():
if hasattr(module, self.hook_name):
hook = getattr(module, self.hook_name)
if hasattr(hook, '__call__'):
hook(*args, **kwargs)
#
# Define some functions that can be called to fire off hooks.
#
pre_run = HookRunner('pre_run')
pre_install = HookRunner('pre_install')
post_install = HookRunner('post_install')
pre_uninstall = HookRunner('pre_uninstall')
post_uninstall = HookRunner('post_uninstall')
| lgpl-2.1 | 7,265,135,582,644,949,000 | 33.647059 | 78 | 0.662479 | false |
yk5/beam | sdks/python/apache_beam/runners/pipeline_context.py | 3 | 3550 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility class for serializing pipelines via the runner API.
For internal use only; no backwards-compatibility guarantees.
"""
from apache_beam import pipeline
from apache_beam import pvalue
from apache_beam import coders
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.transforms import core
class _PipelineContextMap(object):
"""This is a bi-directional map between objects and ids.
Under the hood it encodes and decodes these objects into runner API
representations.
"""
def __init__(self, context, obj_type, proto_map=None):
self._pipeline_context = context
self._obj_type = obj_type
self._obj_to_id = {}
self._id_to_obj = {}
self._id_to_proto = proto_map if proto_map else {}
self._counter = 0
def _unique_ref(self, obj=None):
self._counter += 1
return "ref_%s_%s_%s" % (
self._obj_type.__name__, type(obj).__name__, self._counter)
def populate_map(self, proto_map):
for id, proto in self._id_to_proto.items():
proto_map[id].CopyFrom(proto)
def get_id(self, obj):
if obj not in self._obj_to_id:
id = self._unique_ref(obj)
self._id_to_obj[id] = obj
self._obj_to_id[obj] = id
self._id_to_proto[id] = obj.to_runner_api(self._pipeline_context)
return self._obj_to_id[obj]
def get_by_id(self, id):
if id not in self._id_to_obj:
self._id_to_obj[id] = self._obj_type.from_runner_api(
self._id_to_proto[id], self._pipeline_context)
return self._id_to_obj[id]
class PipelineContext(object):
"""For internal use only; no backwards-compatibility guarantees.
Used for accessing and constructing the referenced objects of a Pipeline.
"""
_COMPONENT_TYPES = {
'transforms': pipeline.AppliedPTransform,
'pcollections': pvalue.PCollection,
'coders': coders.Coder,
'windowing_strategies': core.Windowing,
# TODO: environment
}
def __init__(self, proto=None):
if isinstance(proto, beam_fn_api_pb2.ProcessBundleDescriptor):
proto = beam_runner_api_pb2.Components(
coders=dict(proto.codersyyy.items()),
windowing_strategies=dict(proto.windowing_strategies.items()),
environments=dict(proto.environments.items()))
for name, cls in self._COMPONENT_TYPES.items():
setattr(
self, name, _PipelineContextMap(
self, cls, getattr(proto, name, None)))
@staticmethod
def from_runner_api(proto):
return PipelineContext(proto)
def to_runner_api(self):
context_proto = beam_runner_api_pb2.Components()
for name in self._COMPONENT_TYPES:
getattr(self, name).populate_map(getattr(context_proto, name))
return context_proto
| apache-2.0 | 4,571,554,364,851,259,400 | 33.466019 | 75 | 0.693803 | false |
rahimnathwani/django-countries | django_countries/data.py | 4 | 17825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a self-generating script that contains all of the iso3166-1 data.
To regenerate, a CSV file must be created that contains the latest data. Here's
how to do that:
1. Visit https://www.iso.org/obp
2. Click the "Country Codes" radio option and click the search button
3. Filter by "Officially assigned codes"
4. Change the results per page to 300
5. Copy the html table and paste into Libreoffice Calc / Excel
6. Delete the French name column
7. Save as a CSV file in django_countries/iso3166-1.csv
8. Run this script from the command line
"""
from __future__ import unicode_literals
import glob
import os
try:
from django.utils.translation import ugettext_lazy as _
except ImportError: # pragma: no cover
# Allows this module to be executed without Django installed.
_ = lambda x: x
COMMON_NAMES = {
"BN": _("Brunei"),
"BO": _("Bolivia"),
"GB": _("United Kingdom"),
"IR": _("Iran"),
"KP": _("North Korea"),
"KR": _("South Korea"),
"LA": _("Laos"),
"MD": _("Moldova"),
"MK": _("Macedonia"),
"RU": _("Russia"),
"SY": _("Syria"),
"TW": _("Taiwan"),
"TZ": _("Tanzania"),
"VE": _("Venezuela"),
"VN": _("Vietnam"),
}
# Nicely titled (and translatable) country names.
COUNTRIES = {
"AF": _("Afghanistan"),
"AX": _("Åland Islands"),
"AL": _("Albania"),
"DZ": _("Algeria"),
"AS": _("American Samoa"),
"AD": _("Andorra"),
"AO": _("Angola"),
"AI": _("Anguilla"),
"AQ": _("Antarctica"),
"AG": _("Antigua and Barbuda"),
"AR": _("Argentina"),
"AM": _("Armenia"),
"AW": _("Aruba"),
"AU": _("Australia"),
"AT": _("Austria"),
"AZ": _("Azerbaijan"),
"BS": _("Bahamas"),
"BH": _("Bahrain"),
"BD": _("Bangladesh"),
"BB": _("Barbados"),
"BY": _("Belarus"),
"BE": _("Belgium"),
"BZ": _("Belize"),
"BJ": _("Benin"),
"BM": _("Bermuda"),
"BT": _("Bhutan"),
"BO": _("Bolivia (Plurinational State of)"),
"BQ": _("Bonaire, Sint Eustatius and Saba"),
"BA": _("Bosnia and Herzegovina"),
"BW": _("Botswana"),
"BV": _("Bouvet Island"),
"BR": _("Brazil"),
"IO": _("British Indian Ocean Territory"),
"BN": _("Brunei Darussalam"),
"BG": _("Bulgaria"),
"BF": _("Burkina Faso"),
"BI": _("Burundi"),
"CV": _("Cabo Verde"),
"KH": _("Cambodia"),
"CM": _("Cameroon"),
"CA": _("Canada"),
"KY": _("Cayman Islands"),
"CF": _("Central African Republic"),
"TD": _("Chad"),
"CL": _("Chile"),
"CN": _("China"),
"CX": _("Christmas Island"),
"CC": _("Cocos (Keeling) Islands"),
"CO": _("Colombia"),
"KM": _("Comoros"),
"CD": _("Congo (the Democratic Republic of the)"),
"CG": _("Congo"),
"CK": _("Cook Islands"),
"CR": _("Costa Rica"),
"CI": _("Côte d'Ivoire"),
"HR": _("Croatia"),
"CU": _("Cuba"),
"CW": _("Curaçao"),
"CY": _("Cyprus"),
"CZ": _("Czech Republic"),
"DK": _("Denmark"),
"DJ": _("Djibouti"),
"DM": _("Dominica"),
"DO": _("Dominican Republic"),
"EC": _("Ecuador"),
"EG": _("Egypt"),
"SV": _("El Salvador"),
"GQ": _("Equatorial Guinea"),
"ER": _("Eritrea"),
"EE": _("Estonia"),
"ET": _("Ethiopia"),
"FK": _("Falkland Islands [Malvinas]"),
"FO": _("Faroe Islands"),
"FJ": _("Fiji"),
"FI": _("Finland"),
"FR": _("France"),
"GF": _("French Guiana"),
"PF": _("French Polynesia"),
"TF": _("French Southern Territories"),
"GA": _("Gabon"),
"GM": _("Gambia"),
"GE": _("Georgia"),
"DE": _("Germany"),
"GH": _("Ghana"),
"GI": _("Gibraltar"),
"GR": _("Greece"),
"GL": _("Greenland"),
"GD": _("Grenada"),
"GP": _("Guadeloupe"),
"GU": _("Guam"),
"GT": _("Guatemala"),
"GG": _("Guernsey"),
"GN": _("Guinea"),
"GW": _("Guinea-Bissau"),
"GY": _("Guyana"),
"HT": _("Haiti"),
"HM": _("Heard Island and McDonald Islands"),
"VA": _("Holy See"),
"HN": _("Honduras"),
"HK": _("Hong Kong"),
"HU": _("Hungary"),
"IS": _("Iceland"),
"IN": _("India"),
"ID": _("Indonesia"),
"IR": _("Iran (Islamic Republic of)"),
"IQ": _("Iraq"),
"IE": _("Ireland"),
"IM": _("Isle of Man"),
"IL": _("Israel"),
"IT": _("Italy"),
"JM": _("Jamaica"),
"JP": _("Japan"),
"JE": _("Jersey"),
"JO": _("Jordan"),
"KZ": _("Kazakhstan"),
"KE": _("Kenya"),
"KI": _("Kiribati"),
"KP": _("Korea (the Democratic People's Republic of)"),
"KR": _("Korea (the Republic of)"),
"KW": _("Kuwait"),
"KG": _("Kyrgyzstan"),
"LA": _("Lao People's Democratic Republic"),
"LV": _("Latvia"),
"LB": _("Lebanon"),
"LS": _("Lesotho"),
"LR": _("Liberia"),
"LY": _("Libya"),
"LI": _("Liechtenstein"),
"LT": _("Lithuania"),
"LU": _("Luxembourg"),
"MO": _("Macao"),
"MK": _("Macedonia (the former Yugoslav Republic of)"),
"MG": _("Madagascar"),
"MW": _("Malawi"),
"MY": _("Malaysia"),
"MV": _("Maldives"),
"ML": _("Mali"),
"MT": _("Malta"),
"MH": _("Marshall Islands"),
"MQ": _("Martinique"),
"MR": _("Mauritania"),
"MU": _("Mauritius"),
"YT": _("Mayotte"),
"MX": _("Mexico"),
"FM": _("Micronesia (Federated States of)"),
"MD": _("Moldova (the Republic of)"),
"MC": _("Monaco"),
"MN": _("Mongolia"),
"ME": _("Montenegro"),
"MS": _("Montserrat"),
"MA": _("Morocco"),
"MZ": _("Mozambique"),
"MM": _("Myanmar"),
"NA": _("Namibia"),
"NR": _("Nauru"),
"NP": _("Nepal"),
"NL": _("Netherlands"),
"NC": _("New Caledonia"),
"NZ": _("New Zealand"),
"NI": _("Nicaragua"),
"NE": _("Niger"),
"NG": _("Nigeria"),
"NU": _("Niue"),
"NF": _("Norfolk Island"),
"MP": _("Northern Mariana Islands"),
"NO": _("Norway"),
"OM": _("Oman"),
"PK": _("Pakistan"),
"PW": _("Palau"),
"PS": _("Palestine, State of"),
"PA": _("Panama"),
"PG": _("Papua New Guinea"),
"PY": _("Paraguay"),
"PE": _("Peru"),
"PH": _("Philippines"),
"PN": _("Pitcairn"),
"PL": _("Poland"),
"PT": _("Portugal"),
"PR": _("Puerto Rico"),
"QA": _("Qatar"),
"RE": _("Réunion"),
"RO": _("Romania"),
"RU": _("Russian Federation"),
"RW": _("Rwanda"),
"BL": _("Saint Barthélemy"),
"SH": _("Saint Helena, Ascension and Tristan da Cunha"),
"KN": _("Saint Kitts and Nevis"),
"LC": _("Saint Lucia"),
"MF": _("Saint Martin (French part)"),
"PM": _("Saint Pierre and Miquelon"),
"VC": _("Saint Vincent and the Grenadines"),
"WS": _("Samoa"),
"SM": _("San Marino"),
"ST": _("Sao Tome and Principe"),
"SA": _("Saudi Arabia"),
"SN": _("Senegal"),
"RS": _("Serbia"),
"SC": _("Seychelles"),
"SL": _("Sierra Leone"),
"SG": _("Singapore"),
"SX": _("Sint Maarten (Dutch part)"),
"SK": _("Slovakia"),
"SI": _("Slovenia"),
"SB": _("Solomon Islands"),
"SO": _("Somalia"),
"ZA": _("South Africa"),
"GS": _("South Georgia and the South Sandwich Islands"),
"SS": _("South Sudan"),
"ES": _("Spain"),
"LK": _("Sri Lanka"),
"SD": _("Sudan"),
"SR": _("Suriname"),
"SJ": _("Svalbard and Jan Mayen"),
"SZ": _("Swaziland"),
"SE": _("Sweden"),
"CH": _("Switzerland"),
"SY": _("Syrian Arab Republic"),
"TW": _("Taiwan (Province of China)"),
"TJ": _("Tajikistan"),
"TZ": _("Tanzania, United Republic of"),
"TH": _("Thailand"),
"TL": _("Timor-Leste"),
"TG": _("Togo"),
"TK": _("Tokelau"),
"TO": _("Tonga"),
"TT": _("Trinidad and Tobago"),
"TN": _("Tunisia"),
"TR": _("Turkey"),
"TM": _("Turkmenistan"),
"TC": _("Turks and Caicos Islands"),
"TV": _("Tuvalu"),
"UG": _("Uganda"),
"UA": _("Ukraine"),
"AE": _("United Arab Emirates"),
"GB": _("United Kingdom of Great Britain and Northern Ireland"),
"UM": _("United States Minor Outlying Islands"),
"US": _("United States of America"),
"UY": _("Uruguay"),
"UZ": _("Uzbekistan"),
"VU": _("Vanuatu"),
"VE": _("Venezuela (Bolivarian Republic of)"),
"VN": _("Viet Nam"),
"VG": _("Virgin Islands (British)"),
"VI": _("Virgin Islands (U.S.)"),
"WF": _("Wallis and Futuna"),
"EH": _("Western Sahara"),
"YE": _("Yemen"),
"ZM": _("Zambia"),
"ZW": _("Zimbabwe"),
}
ALT_CODES = {
"AF": ("AFG", 4),
"AX": ("ALA", 248),
"AL": ("ALB", 8),
"DZ": ("DZA", 12),
"AS": ("ASM", 16),
"AD": ("AND", 20),
"AO": ("AGO", 24),
"AI": ("AIA", 660),
"AQ": ("ATA", 10),
"AG": ("ATG", 28),
"AR": ("ARG", 32),
"AM": ("ARM", 51),
"AW": ("ABW", 533),
"AU": ("AUS", 36),
"AT": ("AUT", 40),
"AZ": ("AZE", 31),
"BS": ("BHS", 44),
"BH": ("BHR", 48),
"BD": ("BGD", 50),
"BB": ("BRB", 52),
"BY": ("BLR", 112),
"BE": ("BEL", 56),
"BZ": ("BLZ", 84),
"BJ": ("BEN", 204),
"BM": ("BMU", 60),
"BT": ("BTN", 64),
"BO": ("BOL", 68),
"BQ": ("BES", 535),
"BA": ("BIH", 70),
"BW": ("BWA", 72),
"BV": ("BVT", 74),
"BR": ("BRA", 76),
"IO": ("IOT", 86),
"BN": ("BRN", 96),
"BG": ("BGR", 100),
"BF": ("BFA", 854),
"BI": ("BDI", 108),
"CV": ("CPV", 132),
"KH": ("KHM", 116),
"CM": ("CMR", 120),
"CA": ("CAN", 124),
"KY": ("CYM", 136),
"CF": ("CAF", 140),
"TD": ("TCD", 148),
"CL": ("CHL", 152),
"CN": ("CHN", 156),
"CX": ("CXR", 162),
"CC": ("CCK", 166),
"CO": ("COL", 170),
"KM": ("COM", 174),
"CD": ("COD", 180),
"CG": ("COG", 178),
"CK": ("COK", 184),
"CR": ("CRI", 188),
"CI": ("CIV", 384),
"HR": ("HRV", 191),
"CU": ("CUB", 192),
"CW": ("CUW", 531),
"CY": ("CYP", 196),
"CZ": ("CZE", 203),
"DK": ("DNK", 208),
"DJ": ("DJI", 262),
"DM": ("DMA", 212),
"DO": ("DOM", 214),
"EC": ("ECU", 218),
"EG": ("EGY", 818),
"SV": ("SLV", 222),
"GQ": ("GNQ", 226),
"ER": ("ERI", 232),
"EE": ("EST", 233),
"ET": ("ETH", 231),
"FK": ("FLK", 238),
"FO": ("FRO", 234),
"FJ": ("FJI", 242),
"FI": ("FIN", 246),
"FR": ("FRA", 250),
"GF": ("GUF", 254),
"PF": ("PYF", 258),
"TF": ("ATF", 260),
"GA": ("GAB", 266),
"GM": ("GMB", 270),
"GE": ("GEO", 268),
"DE": ("DEU", 276),
"GH": ("GHA", 288),
"GI": ("GIB", 292),
"GR": ("GRC", 300),
"GL": ("GRL", 304),
"GD": ("GRD", 308),
"GP": ("GLP", 312),
"GU": ("GUM", 316),
"GT": ("GTM", 320),
"GG": ("GGY", 831),
"GN": ("GIN", 324),
"GW": ("GNB", 624),
"GY": ("GUY", 328),
"HT": ("HTI", 332),
"HM": ("HMD", 334),
"VA": ("VAT", 336),
"HN": ("HND", 340),
"HK": ("HKG", 344),
"HU": ("HUN", 348),
"IS": ("ISL", 352),
"IN": ("IND", 356),
"ID": ("IDN", 360),
"IR": ("IRN", 364),
"IQ": ("IRQ", 368),
"IE": ("IRL", 372),
"IM": ("IMN", 833),
"IL": ("ISR", 376),
"IT": ("ITA", 380),
"JM": ("JAM", 388),
"JP": ("JPN", 392),
"JE": ("JEY", 832),
"JO": ("JOR", 400),
"KZ": ("KAZ", 398),
"KE": ("KEN", 404),
"KI": ("KIR", 296),
"KP": ("PRK", 408),
"KR": ("KOR", 410),
"KW": ("KWT", 414),
"KG": ("KGZ", 417),
"LA": ("LAO", 418),
"LV": ("LVA", 428),
"LB": ("LBN", 422),
"LS": ("LSO", 426),
"LR": ("LBR", 430),
"LY": ("LBY", 434),
"LI": ("LIE", 438),
"LT": ("LTU", 440),
"LU": ("LUX", 442),
"MO": ("MAC", 446),
"MK": ("MKD", 807),
"MG": ("MDG", 450),
"MW": ("MWI", 454),
"MY": ("MYS", 458),
"MV": ("MDV", 462),
"ML": ("MLI", 466),
"MT": ("MLT", 470),
"MH": ("MHL", 584),
"MQ": ("MTQ", 474),
"MR": ("MRT", 478),
"MU": ("MUS", 480),
"YT": ("MYT", 175),
"MX": ("MEX", 484),
"FM": ("FSM", 583),
"MD": ("MDA", 498),
"MC": ("MCO", 492),
"MN": ("MNG", 496),
"ME": ("MNE", 499),
"MS": ("MSR", 500),
"MA": ("MAR", 504),
"MZ": ("MOZ", 508),
"MM": ("MMR", 104),
"NA": ("NAM", 516),
"NR": ("NRU", 520),
"NP": ("NPL", 524),
"NL": ("NLD", 528),
"NC": ("NCL", 540),
"NZ": ("NZL", 554),
"NI": ("NIC", 558),
"NE": ("NER", 562),
"NG": ("NGA", 566),
"NU": ("NIU", 570),
"NF": ("NFK", 574),
"MP": ("MNP", 580),
"NO": ("NOR", 578),
"OM": ("OMN", 512),
"PK": ("PAK", 586),
"PW": ("PLW", 585),
"PS": ("PSE", 275),
"PA": ("PAN", 591),
"PG": ("PNG", 598),
"PY": ("PRY", 600),
"PE": ("PER", 604),
"PH": ("PHL", 608),
"PN": ("PCN", 612),
"PL": ("POL", 616),
"PT": ("PRT", 620),
"PR": ("PRI", 630),
"QA": ("QAT", 634),
"RE": ("REU", 638),
"RO": ("ROU", 642),
"RU": ("RUS", 643),
"RW": ("RWA", 646),
"BL": ("BLM", 652),
"SH": ("SHN", 654),
"KN": ("KNA", 659),
"LC": ("LCA", 662),
"MF": ("MAF", 663),
"PM": ("SPM", 666),
"VC": ("VCT", 670),
"WS": ("WSM", 882),
"SM": ("SMR", 674),
"ST": ("STP", 678),
"SA": ("SAU", 682),
"SN": ("SEN", 686),
"RS": ("SRB", 688),
"SC": ("SYC", 690),
"SL": ("SLE", 694),
"SG": ("SGP", 702),
"SX": ("SXM", 534),
"SK": ("SVK", 703),
"SI": ("SVN", 705),
"SB": ("SLB", 90),
"SO": ("SOM", 706),
"ZA": ("ZAF", 710),
"GS": ("SGS", 239),
"SS": ("SSD", 728),
"ES": ("ESP", 724),
"LK": ("LKA", 144),
"SD": ("SDN", 729),
"SR": ("SUR", 740),
"SJ": ("SJM", 744),
"SZ": ("SWZ", 748),
"SE": ("SWE", 752),
"CH": ("CHE", 756),
"SY": ("SYR", 760),
"TW": ("TWN", 158),
"TJ": ("TJK", 762),
"TZ": ("TZA", 834),
"TH": ("THA", 764),
"TL": ("TLS", 626),
"TG": ("TGO", 768),
"TK": ("TKL", 772),
"TO": ("TON", 776),
"TT": ("TTO", 780),
"TN": ("TUN", 788),
"TR": ("TUR", 792),
"TM": ("TKM", 795),
"TC": ("TCA", 796),
"TV": ("TUV", 798),
"UG": ("UGA", 800),
"UA": ("UKR", 804),
"AE": ("ARE", 784),
"GB": ("GBR", 826),
"UM": ("UMI", 581),
"US": ("USA", 840),
"UY": ("URY", 858),
"UZ": ("UZB", 860),
"VU": ("VUT", 548),
"VE": ("VEN", 862),
"VN": ("VNM", 704),
"VG": ("VGB", 92),
"VI": ("VIR", 850),
"WF": ("WLF", 876),
"EH": ("ESH", 732),
"YE": ("YEM", 887),
"ZM": ("ZMB", 894),
"ZW": ("ZWE", 716),
}
def self_generate(
output_filename, filename='iso3166-1.csv'): # pragma: no cover
"""
The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns.
"""
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub(r'\(the\)', '', name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((
row[1].decode('utf-8'),
row[2].decode('utf-8'),
int(row[3]),
))
with open(__file__, 'r') as source_file:
contents = source_file.read()
# Write countries.
bits = re.match(
'(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)', contents, re.DOTALL).groups()
country_list = []
for name, code in countries:
name = name.replace('"', r'\"').strip()
country_list.append(
' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
# Write alt codes.
alt_bits = re.match(
'(.*\nALT_CODES = \{\n)(.*)(\n\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for code, code3, codenum in alt_codes:
name = name.replace('"', r'\"').strip()
alt_list.append(
' "{code}": ("{code3}", {codenum}),'.format(
code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
# Generate file.
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries
def check_flags(verbosity=1):
files = {}
this_dir = os.path.dirname(__file__)
for path in glob.glob(os.path.join(this_dir, 'static', 'flags', '*.gif')):
files[os.path.basename(os.path.splitext(path)[0]).upper()] = path
flags_missing = set(COUNTRIES) - set(files)
if flags_missing: # pragma: no cover
print("The following country codes are missing a flag:")
for code in sorted(flags_missing):
print(" {0} ({1})".format(code, COUNTRIES[code]))
elif verbosity: # pragma: no cover
print("All country codes have flags. :)")
code_missing = set(files) - set(COUNTRIES)
# Special-case EU and __
for special_code in ('EU', '__'):
code_missing.discard(special_code)
if code_missing: # pragma: no cover
print("")
print("The following flags don't have a matching country code:")
for path in sorted(code_missing):
print(" {0}".format(path))
def check_common_names():
common_names_missing = set(COMMON_NAMES) - set(COUNTRIES)
if common_names_missing: # pragma: no cover
print("")
print(
"The following common names do not match an official country "
"code:")
for code in sorted(common_names_missing):
print(" {0}".format(code))
if __name__ == '__main__': # pragma: no cover
countries = self_generate(__file__)
print('Wrote {0} countries.'.format(len(countries)))
print("")
check_flags()
check_common_names()
| mit | 1,678,677,305,274,229,000 | 26.542504 | 79 | 0.441134 | false |
inercia/candelabra | candelabra/scheduler/rad_util.py | 1 | 26012 | # Copyright (c) 2007 RADLogic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provide various handy Python functions.
Running this script directly will execute the doctests.
Functions:
int2bin(i, n) -- Convert integer to binary string.
bin2int(bin_string) -- Convert binary string to integer.
reverse(input_string) -- Reverse a string.
transpose(matrix) -- Transpose a list of lists.
polygon_area(points_list) -- Calculate the area of an arbitrary polygon.
timestamp() -- Return string containing current time stamp.
pt2str(point) -- Return prettier string version of point tuple.
gcf(a, b) -- Return the greatest common factor of two numbers.
lcm(a, b) -- Return the least common multiple of two numbers.
permutations(input_list) -- Generate all permutations of a list of items.
reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form.
quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1.
trim(l) -- Discard values in list more than 1.5*IQR outside IQR.
nice_units(value) -- Return value converted to human readable units.
uniquify(seq) -- Return sequence with duplicate items in sequence seq removed.
reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa.
lsb(x, n) -- Return the n least significant bits of x.
gray_encode(i) -- Gray encode the given integer.
random_vec(bits, max_value=None) -- Return a random binary vector.
binary_range(bits) -- Return list of all possible binary numbers width=bits.
float_range([start], stop, [step]) -- Return range of floats.
find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings.
is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list.
getmodule(obj) -- Return the module that contains the object definition of obj.
(use inspect.getmodule instead, though)
get_args(argv) -- Store command-line args in a dictionary.
This module requires Python >= 2.2
"""
__author__ = 'Tim Wegener <[email protected]>'
__date__ = '$Date: 2007/03/27 03:15:06 $'
__version__ = '$Revision: 0.45 $'
__credits__ = """
David Chandler, for polygon area algorithm.
(http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf)
"""
import re
import sys
import time
import random
try:
True, False
except NameError:
True, False = (1==1, 0==1)
def int2bin(i, n):
"""Convert decimal integer i to n-bit binary number (string).
>>> int2bin(0, 8)
'00000000'
>>> int2bin(123, 8)
'01111011'
>>> int2bin(123L, 8)
'01111011'
>>> int2bin(15, 2)
Traceback (most recent call last):
ValueError: Value too large for given number of bits.
"""
hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
# Convert to hex then map each hex digit to binary equivalent.
result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]])
# Shrink result to appropriate length.
# Raise an error if the value is changed by the truncation.
if '1' in result[:-n]:
raise ValueError("Value too large for given number of bits.")
result = result[-n:]
# Zero-pad if length longer than mapped result.
result = '0'*(n-len(result)) + result
return result
def bin2int(bin_string):
"""Convert binary number string to decimal integer.
Note: Python > v2 has int(bin_string, 2)
>>> bin2int('1111')
15
>>> bin2int('0101')
5
"""
## result = 0
## bin_list = list(bin_string)
## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list):
## raise Exception ("bin2int: Error - not a binary number: %s"
## % bin_string)
## bit_list = map(int, bin_list)
## bit_list.reverse() # Make most significant bit have highest index.
## for bit_place in range(len(bit_list)):
## result = result + ((2**bit_place) * bit_list[bit_place])
## return result
return int(bin_string, 2)
def reverse(input_string):
"""Reverse a string. Useful for strings of binary numbers.
>>> reverse('abc')
'cba'
"""
str_list = list(input_string)
str_list.reverse()
return ''.join(str_list)
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result
def polygon_area(points_list, precision=100):
"""Calculate area of an arbitrary polygon using an algorithm from the web.
Return the area of the polygon as a positive float.
Arguments:
points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...]
(Unclosed polygons will be closed automatically.
precision -- Internal arithmetic precision (integer arithmetic).
>>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)])
3.0
Credits:
Area of a General Polygon by David Chandler
http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf
"""
# Scale up co-ordinates and convert them to integers.
for i in range(len(points_list)):
points_list[i] = (int(points_list[i][0] * precision),
int(points_list[i][1] * precision))
# Close polygon if not closed.
if points_list[-1] != points_list[0]:
points_list.append(points_list[0])
# Calculate area.
area = 0
for i in range(len(points_list)-1):
(x_i, y_i) = points_list[i]
(x_i_plus_1, y_i_plus_1) = points_list[i+1]
area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i)
area = abs(area / 2)
# Unscale area.
area = float(area)/(precision**2)
return area
def timestamp():
"""Return string containing current time stamp.
Note: In Python 2 onwards can use time.asctime() with no arguments.
"""
return time.asctime()
def pt2str(point):
"""Return prettier string version of point tuple.
>>> pt2str((1.8, 1.9))
'(1.8, 1.9)'
"""
return "(%s, %s)" % (str(point[0]), str(point[1]))
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
def lcm(a, b, precision=None):
"""Return the least common multiple of a and b, using the gcf function.
Arguments:
a, b -- two numbers. If both are integers return an integer result,
otherwise a return a float result.
precision -- scaling factor if a and/or b are floats.
>>> lcm(21, 6)
42
>>> lcm(2.5, 3.5)
17.5
>>> str(lcm(1.5e-8, 2.5e-8, precision=1e9))
'7.5e-08'
By (an arbitary) definition:
>>> lcm(0, 0)
0
"""
# Note: Dummy precision argument is for backwards compatibility.
# Do the division first.
# (See http://en.wikipedia.org/wiki/Least_common_multiple )
denom = gcf(a, b)
if denom == 0:
result = 0
else:
result = a * (b / denom)
return result
def permutations(input_list):
"""Return a list containing all permutations of the input list.
Note: This is a recursive function.
>>> perms = permutations(['a', 'b', 'c'])
>>> perms.sort()
>>> for perm in perms:
... print perm
['a', 'b', 'c']
['a', 'c', 'b']
['b', 'a', 'c']
['b', 'c', 'a']
['c', 'a', 'b']
['c', 'b', 'a']
"""
out_lists = []
if len(input_list) > 1:
# Extract first item in list.
item = input_list[0]
# Find all permutations of remainder of list. (Recursive call.)
sub_lists = permutations(input_list[1:])
# For every permutation of the sub list...
for sub_list in sub_lists:
# Insert the extracted first item at every position of the list.
for i in range(len(input_list)):
new_list = sub_list[:]
new_list.insert(i, item)
out_lists.append(new_list)
else:
# Termination condition: only one item in input list.
out_lists = [input_list]
return out_lists
def reduce_fraction(fraction):
"""Reduce fraction tuple to simplest form. fraction=(num, denom)
>>> reduce_fraction((14, 7))
(2, 1)
>>> reduce_fraction((-2, 4))
(-1, 2)
>>> reduce_fraction((0, 4))
(0, 1)
>>> reduce_fraction((4, 0))
(1, 0)
"""
(numerator, denominator) = fraction
common_factor = abs(gcf(numerator, denominator))
result = (numerator/common_factor, denominator/common_factor)
return result
def quantile(l, p):
"""Return p quantile of list l. E.g. p=0.25 for q1.
See:
http://rweb.stat.umn.edu/R/library/base/html/quantile.html
"""
l_sort = l[:]
l_sort.sort()
n = len(l)
r = 1 + ((n - 1) * p)
i = int(r)
f = r - i
if i < n:
result = (1-f)*l_sort[i-1] + f*l_sort[i]
else:
result = l_sort[i-1]
return result
def trim(l):
"""Discard values in list more than 1.5*IQR outside IQR.
(IQR is inter-quartile-range)
This function uses rad_util.quantile
1.5*IQR -- mild outlier
3*IQR -- extreme outlier
See:
http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
"""
l_sort = l[:]
l_sort.sort()
# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)
if len(l_sort) % 2 == 0:
# If even number of scores, average middle 2.
index = int(len(l_sort) / 2) # Integer division correct
median = float(l_sort[index] + l_sort[index-1]) / 2
else:
# int divsion gives mid value when count from 0
index = int(len(l_sort) / 2)
median = l_sort[index]
# Calculate IQR.
q1 = quantile(l_sort, 0.25)
q3 = quantile(l_sort, 0.75)
iqr = q3 - q1
iqr_extra = iqr * 1.5
def in_interval(x, i=iqr_extra, q1=q1, q3=q3):
return (x >= q1-i and x <= q3+i)
l_trimmed = [x for x in l_sort if in_interval(x)]
return l_trimmed
def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ',
use_extra_prefixes=False, use_full_name=False, mode='si'):
"""Return value converted to human readable units eg milli, micro, etc.
Arguments:
value -- number in base units
dp -- number of decimal places to display (rounded)
sigfigs -- number of significant figures to display (rounded)
This overrides dp if set.
suffix -- optional unit suffix to append to unit multiplier
space -- seperator between value and unit multiplier (default: ' ')
use_extra_prefixes -- use hecto, deka, deci and centi as well if set.
(default: False)
use_full_name -- use full name for multiplier symbol,
e.g. milli instead of m
(default: False)
mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.)
(Default: 'si')
SI prefixes from:
http://physics.nist.gov/cuu/Units/prefixes.html
(Greek mu changed to u.)
Binary prefixes based on:
http://physics.nist.gov/cuu/Units/binary.html
>>> nice_units(2e-11)
'20 p'
>>> nice_units(2e-11, space='')
'20p'
"""
si_prefixes = {1e24: ('Y', 'yotta'),
1e21: ('Z', 'zetta'),
1e18: ('E', 'exa'),
1e15: ('P', 'peta'),
1e12: ('T', 'tera'),
1e9: ('G', 'giga'),
1e6: ('M', 'mega'),
1e3: ('k', 'kilo'),
1e-3: ('m', 'milli'),
1e-6: ('u', 'micro'),
1e-9: ('n', 'nano'),
1e-12: ('p', 'pico'),
1e-15: ('f', 'femto'),
1e-18: ('a', 'atto'),
1e-21: ('z', 'zepto'),
1e-24: ('y', 'yocto')
}
if use_extra_prefixes:
si_prefixes.update({1e2: ('h', 'hecto'),
1e1: ('da', 'deka'),
1e-1: ('d', 'deci'),
1e-2: ('c', 'centi')
})
bin_prefixes = {2**10: ('K', 'kilo'),
2**20: ('M', 'mega'),
2**30: ('G', 'mega'),
2**40: ('T', 'tera'),
2**50: ('P', 'peta'),
2**60: ('E', 'exa')
}
if mode == 'bin':
prefixes = bin_prefixes
else:
prefixes = si_prefixes
prefixes[1] = ('', '') # Unity.
# Determine appropriate multiplier.
multipliers = prefixes.keys()
multipliers.sort()
mult = None
for i in range(len(multipliers) - 1):
lower_mult = multipliers[i]
upper_mult = multipliers[i+1]
if lower_mult <= value < upper_mult:
mult_i = i
break
if mult is None:
if value < multipliers[0]:
mult_i = 0
elif value >= multipliers[-1]:
mult_i = len(multipliers) - 1
mult = multipliers[mult_i]
# Convert value for this multiplier.
new_value = value / mult
# Deal with special case due to rounding.
if sigfigs is None:
if mult_i < (len(multipliers) - 1) and \
round(new_value, dp) == \
round((multipliers[mult_i+1] / mult), dp):
mult = multipliers[mult_i + 1]
new_value = value / mult
# Concatenate multiplier symbol.
if use_full_name:
label_type = 1
else:
label_type = 0
# Round and truncate to appropriate precision.
if sigfigs is None:
str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {})
else:
str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {})
return str_value + space + prefixes[mult][label_type] + suffix
def uniquify(seq, preserve_order=False):
"""Return sequence with duplicate items in sequence seq removed.
The code is based on usenet post by Tim Peters.
This code is O(N) if the sequence items are hashable, O(N**2) if not.
Peter Bengtsson has a blog post with an empirical comparison of other
approaches:
http://www.peterbe.com/plog/uniqifiers-benchmark
If order is not important and the sequence items are hashable then
list(set(seq)) is readable and efficient.
If order is important and the sequence items are hashable generator
expressions can be used (in py >= 2.4) (useful for large sequences):
seen = set()
do_something(x for x in seq if x not in seen or seen.add(x))
Arguments:
seq -- sequence
preserve_order -- if not set the order will be arbitrary
Using this option will incur a speed penalty.
(default: False)
Example showing order preservation:
>>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True)
['a', 'aa', 'b', 'ccc', 'd']
Example using a sequence of un-hashable items:
>>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True)
[['z'], ['x'], ['y']]
The sorted output or the non-order-preserving approach should equal
that of the sorted order-preserving approach output:
>>> unordered = uniquify([3, 3, 1, 2], preserve_order=False)
>>> unordered.sort()
>>> ordered = uniquify([3, 3, 1, 2], preserve_order=True)
>>> ordered.sort()
>>> ordered
[1, 2, 3]
>>> int(ordered == unordered)
1
"""
try:
# Attempt fast algorithm.
d = {}
if preserve_order:
# This is based on Dave Kirby's method (f8) noted in the post:
# http://www.peterbe.com/plog/uniqifiers-benchmark
return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)]
else:
for x in seq:
d[x] = 0
return d.keys()
except TypeError:
# Have an unhashable object, so use slow algorithm.
result = []
app = result.append
for x in seq:
if x not in result:
app(x)
return result
# Alias to noun form for backward compatibility.
unique = uniquify
def reverse_dict(d):
"""Reverse a dictionary so the items become the keys and vice-versa.
Note: The results will be arbitrary if the items are not unique.
>>> d = reverse_dict({'a': 1, 'b': 2})
>>> d_items = d.items()
>>> d_items.sort()
>>> d_items
[(1, 'a'), (2, 'b')]
"""
result = {}
for key, value in d.items():
result[value] = key
return result
def lsb(x, n):
"""Return the n least significant bits of x.
>>> lsb(13, 3)
5
"""
return x & ((2 ** n) - 1)
def gray_encode(i):
"""Gray encode the given integer."""
return i ^ (i >> 1)
def random_vec(bits, max_value=None):
"""Generate a random binary vector of length bits and given max value."""
vector = ""
for _ in range(int(bits / 10) + 1):
i = int((2**10) * random.random())
vector += int2bin(i, 10)
if max_value and (max_value < 2 ** bits - 1):
vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits)
return vector[0:bits]
def binary_range(bits):
"""Return a list of all possible binary numbers in order with width=bits.
It would be nice to extend it to match the
functionality of python's range() built-in function.
"""
l = []
v = ['0'] * bits
toggle = [1] + [0] * bits
while toggle[bits] != 1:
v_copy = v[:]
v_copy.reverse()
l.append(''.join(v_copy))
toggle = [1] + [0]*bits
i = 0
while i < bits and toggle[i] == 1:
if toggle[i]:
if v[i] == '0':
v[i] = '1'
toggle[i+1] = 0
else:
v[i] = '0'
toggle[i+1] = 1
i += 1
return l
def float_range(start, stop=None, step=None):
"""Return a list containing an arithmetic progression of floats.
Return a list of floats between 0.0 (or start) and stop with an
increment of step.
This is in functionality to python's range() built-in function
but can accept float increments.
As with range(), stop is omitted from the list.
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
l = []
while cur < stop:
l.append(cur)
cur += step
return l
def find_common_fixes(s1, s2):
"""Find common (prefix, suffix) of two strings.
>>> find_common_fixes('abc', 'def')
('', '')
>>> find_common_fixes('abcelephantdef', 'abccowdef')
('abc', 'def')
>>> find_common_fixes('abcelephantdef', 'abccow')
('abc', '')
>>> find_common_fixes('elephantdef', 'abccowdef')
('', 'def')
"""
prefix = []
suffix = []
i = 0
common_len = min(len(s1), len(s2))
while i < common_len:
if s1[i] != s2[i]:
break
prefix.append(s1[i])
i += 1
i = 1
while i < (common_len + 1):
if s1[-i] != s2[-i]:
break
suffix.append(s1[-i])
i += 1
suffix.reverse()
prefix = ''.join(prefix)
suffix = ''.join(suffix)
return (prefix, suffix)
def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False
def getmodule(obj):
"""Return the module that contains the object definition of obj.
Note: Use inspect.getmodule instead.
Arguments:
obj -- python obj, generally a class or a function
Examples:
A function:
>>> module = getmodule(random.choice)
>>> module.__name__
'random'
>>> module is random
1
A class:
>>> module = getmodule(random.Random)
>>> module.__name__
'random'
>>> module is random
1
A class inheriting from a class in another module:
(note: The inheriting class must define at least one function.)
>>> class MyRandom(random.Random):
... def play(self):
... pass
>>> module = getmodule(MyRandom)
>>> if __name__ == '__main__':
... name = 'rad_util'
... else:
... name = module.__name__
>>> name
'rad_util'
>>> module is sys.modules[__name__]
1
Discussion:
This approach is slightly hackish, and won't work in various situations.
However, this was the approach recommended by GvR, so it's as good as
you'll get.
See GvR's post in this thread:
http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236
"""
if hasattr(obj, 'func_globals'):
func = obj
else:
# Handle classes.
func = None
for item in obj.__dict__.values():
if hasattr(item, 'func_globals'):
func = item
break
if func is None:
raise ValueError("No functions attached to object: %r" % obj)
module_name = func.func_globals['__name__']
# Get module.
module = sys.modules[module_name]
return module
def round_grid(value, grid, mode=0):
"""Round off the given value to the given grid size.
Arguments:
value -- value to be roudne
grid -- result must be a multiple of this
mode -- 0 nearest, 1 up, -1 down
Examples:
>>> round_grid(7.5, 5)
10
>>> round_grid(7.5, 5, mode=-1)
5
>>> round_grid(7.3, 5, mode=1)
10
>>> round_grid(7.3, 5.0, mode=1)
10.0
"""
off_grid = value % grid
if mode == 0:
add_one = int(off_grid >= (grid / 2.0))
elif mode == 1 and off_grid:
add_one = 1
elif mode == -1 and off_grid:
add_one = 0
result = ((int(value / grid) + add_one) * grid)
return result
def get_args(argv):
"""Store command-line args in a dictionary.
-, -- prefixes are removed
Items not prefixed with - or -- are stored as a list, indexed by 'args'
For options that take a value use --option=value
Consider using optparse or getopt (in Python standard library) instead.
"""
d = {}
args = []
for arg in argv:
if arg.startswith('-'):
parts = re.sub(r'^-+', '', arg).split('=')
if len(parts) == 2:
d[parts[0]] = parts[1]
else:
d[parts[0]] = None
else:
args.append(arg)
d['args'] = args
return d
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
| bsd-2-clause | 977,809,573,443,822,300 | 27.647577 | 178 | 0.558012 | false |
derekstavis/bluntly | vendor/github.com/youtube/vitess/py/vttest/run_local_database.py | 1 | 7646 | #!/usr/bin/env python
r"""Command-line tool for starting a local Vitess database for testing.
USAGE:
$ run_local_database --port 12345 \
--proto_topo <vttest proto as string> \
--schema_dir /path/to/schema/dir
It will run the tool, logging to stderr. On stdout, a small json structure
can be waited on and then parsed by the caller to figure out how to reach
the vtgate process.
As an alternative to using proto_topo, a local instance can be started by using
additional flags, such as:
$ run_local_database --port 12345 \
--schema_dir /path/to/schema/dir \
--cells cell1,cell2 --keyspaces ks1,ks2 \
--num_shards 1,2
This will create an instance with two keyspaces in two cells, one with a single
shard and another with two shards.
Once done with the test, send an empty line to this process for it to clean-up,
and then just wait for it to exit.
"""
import json
import logging
import optparse
import os
import sys
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
from google.protobuf import text_format
from vttest import environment
from vttest import local_database
from vttest import mysql_flavor
from vttest import init_data_options
from vttest import sharding_utils
from vtproto import vttest_pb2
def main(cmdline_options):
topology = vttest_pb2.VTTestTopology()
if cmdline_options.proto_topo:
# Text-encoded proto topology object, just parse it.
topology = text_format.Parse(cmdline_options.proto_topo, topology)
if not topology.cells:
topology.cells.append('test')
else:
cells = []
keyspaces = []
shard_counts = []
if cmdline_options.cells:
cells = cmdline_options.cells.split(',')
if cmdline_options.keyspaces:
keyspaces = cmdline_options.keyspaces.split(',')
if cmdline_options.num_shards:
shard_counts = [int(x) for x in cmdline_options.num_shards.split(',')]
for cell in cells:
topology.cells.append(cell)
for keyspace, num_shards in zip(keyspaces, shard_counts):
ks = topology.keyspaces.add(name=keyspace)
for shard in sharding_utils.get_shard_names(num_shards):
ks.shards.add(name=shard)
ks.replica_count = cmdline_options.replica_count
ks.rdonly_count = cmdline_options.rdonly_count
environment.base_port = cmdline_options.port
init_data_opts = None
if cmdline_options.initialize_with_random_data:
init_data_opts = init_data_options.InitDataOptions()
init_data_opts.rng_seed = cmdline_options.rng_seed
init_data_opts.min_table_shard_size = cmdline_options.min_table_shard_size
init_data_opts.max_table_shard_size = cmdline_options.max_table_shard_size
init_data_opts.null_probability = cmdline_options.null_probability
extra_my_cnf = os.path.join(os.environ['VTTOP'], 'config/mycnf/vtcombo.cnf')
if cmdline_options.extra_my_cnf:
extra_my_cnf += ':' + cmdline_options.extra_my_cnf
with local_database.LocalDatabase(
topology,
cmdline_options.schema_dir,
cmdline_options.mysql_only,
init_data_opts,
web_dir=cmdline_options.web_dir,
web_dir2=cmdline_options.web_dir2,
default_schema_dir=cmdline_options.default_schema_dir,
extra_my_cnf=extra_my_cnf) as local_db:
print json.dumps(local_db.config())
sys.stdout.flush()
try:
raw_input()
except EOFError:
sys.stderr.write(
'WARNING: %s: No empty line was received on stdin.'
' Instead, stdin was closed and the cluster will be shut down now.'
' Make sure to send the empty line instead to proactively shutdown'
' the local cluster. For example, did you forget the shutdown in'
' your test\'s tearDown()?\n' % os.path.basename(__file__))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port', type='int',
help='Port to use for vtcombo. If this is 0, a random port '
'will be chosen.')
parser.add_option(
'-o', '--proto_topo',
help='Define the fake cluster topology as a compact text format encoded'
' vttest proto. See vttest.proto for more information.')
parser.add_option(
'-s', '--schema_dir',
help='Directory for initial schema files. Within this dir,'
' there should be a subdir for each keyspace. Within'
' each keyspace dir, each file is executed as SQL'
' after the database is created on each shard.'
' If the directory contains a vschema.json file, it'
' will be used as the vschema for the V3 API.')
parser.add_option(
'-e', '--default_schema_dir',
help='Default directory for initial schema files. If no schema is found'
' in schema_dir, default to this location.')
parser.add_option(
'-m', '--mysql_only', action='store_true',
help='If this flag is set only mysql is initialized.'
' The rest of the vitess components are not started.'
' Also, the output specifies the mysql unix socket'
' instead of the vtgate port.')
parser.add_option(
'-r', '--initialize_with_random_data', action='store_true',
help='If this flag is each table-shard will be initialized'
' with random data. See also the "rng_seed" and "min_shard_size"'
' and "max_shard_size" flags.')
parser.add_option(
'-d', '--rng_seed', type='int', default=123,
help='The random number generator seed to use when initializing'
' with random data (see also --initialize_with_random_data).'
' Multiple runs with the same seed will result with the same'
' initial data.')
parser.add_option(
'-x', '--min_table_shard_size', type='int', default=1000,
help='The minimum number of initial rows in a table shard. Ignored if'
'--initialize_with_random_data is false. The actual number is chosen'
' randomly.')
parser.add_option(
'-y', '--max_table_shard_size', type='int', default=10000,
help='The maximum number of initial rows in a table shard. Ignored if'
'--initialize_with_random_data is false. The actual number is chosen'
' randomly')
parser.add_option(
'-n', '--null_probability', type='float', default=0.1,
help='The probability to initialize a field with "NULL" '
' if --initialize_with_random_data is true. Only applies to fields'
' that can contain NULL values.')
parser.add_option(
'-w', '--web_dir',
help='location of the vtctld web server files.')
parser.add_option(
'--web_dir2',
help='location of the vtctld2 web server files.')
parser.add_option(
'-f', '--extra_my_cnf',
help='extra files to add to the config, separated by ":"')
parser.add_option(
'-v', '--verbose', action='store_true',
help='Display extra error messages.')
parser.add_option('-c', '--cells', default='test',
help='Comma separated list of cells')
parser.add_option('-k', '--keyspaces', default='test_keyspace',
help='Comma separated list of keyspaces')
parser.add_option('--num_shards', default='2',
help='Comma separated shard count (one per keyspace)')
parser.add_option('--replica_count', type='int', default=2,
help='Replica tablets per shard (includes master)')
parser.add_option('--rdonly_count', type='int', default=1,
help='Rdonly tablets per shard')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# This will set the flavor based on the MYSQL_FLAVOR env var,
# or default to MariaDB.
mysql_flavor.set_mysql_flavor(None)
main(options)
| mit | 3,395,204,098,319,261,700 | 37.812183 | 79 | 0.66937 | false |
kiwicopple/MyMDb | venv/Lib/site-packages/sphinx/util/smartypants.py | 35 | 11142 | r"""
This is based on SmartyPants.py by `Chad Miller`_ <[email protected]>,
version 1.5_1.6.
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _Chad Miller: http://web.chad.org/
"""
import re
def sphinx_smarty_pants(t):
t = t.replace('"', '"')
t = educate_dashes_oldschool(t)
t = educate_quotes(t)
t = t.replace('"', '"')
return t
# Constants for quote education.
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
end_of_word_class = r"""[\s.,;:!?)]"""
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
single_quote_start_re = re.compile(r"""^'(?=%s\\B)""" % (punct_class,))
double_quote_start_re = re.compile(r"""^"(?=%s\\B)""" % (punct_class,))
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
double_quote_sets_re = re.compile(r""""'(?=\w)""")
single_quote_sets_re = re.compile(r"""'"(?=\w)""")
# Special case for decade abbreviations (the '80s):
decade_abbr_re = re.compile(r"""\b'(?=\d{2}s)""")
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=%s)
""" % (close_class, end_of_word_class), re.VERBOSE)
closing_double_quotes_regex_2 = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
closing_single_quotes_regex_2 = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
def educate_quotes(s):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes
# by brute force:
s = single_quote_start_re.sub("’", s)
s = double_quote_start_re.sub("”", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("“‘", s)
s = single_quote_sets_re.sub("‘“", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("’", s)
s = opening_single_quotes_regex.sub(r"\1‘", s)
s = closing_single_quotes_regex.sub(r"\1’", s)
s = closing_single_quotes_regex_2.sub(r"\1’\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "‘")
s = opening_double_quotes_regex.sub(r"\1“", s)
s = closing_double_quotes_regex.sub(r"”", s)
s = closing_double_quotes_regex_2.sub(r"\1”", s)
# Any remaining quotes should be opening ones.
return s.replace('"', "“")
def educate_quotes_latex(s, dquotes=("``", "''")):
"""
Parameter: String.
Returns: The string, with double quotes corrected to LaTeX quotes.
Example input: "Isn't this fun?"
Example output: ``Isn't this fun?'';
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes
# by brute force:
s = single_quote_start_re.sub("\x04", s)
s = double_quote_start_re.sub("\x02", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("\x01\x03", s)
s = single_quote_sets_re.sub("\x03\x01", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("\x04", s)
s = opening_single_quotes_regex.sub("\\1\x03", s)
s = closing_single_quotes_regex.sub("\\1\x04", s)
s = closing_single_quotes_regex_2.sub("\\1\x04\\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "\x03")
s = opening_double_quotes_regex.sub("\\1\x01", s)
s = closing_double_quotes_regex.sub("\x02", s)
s = closing_double_quotes_regex_2.sub("\\1\x02", s)
# Any remaining quotes should be opening ones.
s = s.replace('"', "\x01")
# Finally, replace all helpers with quotes.
return s.replace("\x01", dquotes[0]).replace("\x02", dquotes[1]).\
replace("\x03", "`").replace("\x04", "'")
def educate_backticks(s):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
return s.replace("``", "“").replace("''", "”")
def educate_single_backticks(s):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
return s.replace('`', "‘").replace("'", "’")
def educate_dashes_oldschool(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
return s.replace('---', "—").replace('--', "–")
def educate_dashes_oldschool_inverted(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
educate_dashes_oldschool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
return s.replace('---', "–").replace('--', "—")
def educate_ellipses(s):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
return s.replace('...', "…").replace('. . .', "…")
| mit | 1,064,108,085,081,027,300 | 36.264214 | 79 | 0.599174 | false |
Beeblio/django | tests/swappable_models/tests.py | 59 | 2156 | from __future__ import unicode_literals
from django.utils.six import StringIO
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.test import TestCase, override_settings
from swappable_models.models import Article
class SwappableModelTests(TestCase):
available_apps = [
'swappable_models',
'django.contrib.auth',
'django.contrib.contenttypes',
]
@override_settings(TEST_ARTICLE_MODEL='swappable_models.AlternateArticle')
def test_generated_data(self):
"Permissions and content types are not created for a swapped model"
# Delete all permissions and content_types
Permission.objects.filter(content_type__app_label='swappable_models').delete()
ContentType.objects.filter(app_label='swappable_models').delete()
# Re-run migrate. This will re-build the permissions and content types.
new_io = StringIO()
management.call_command('migrate', load_initial_data=False, interactive=False, stdout=new_io)
# Check that content types and permissions exist for the swapped model,
# but not for the swappable model.
apps_models = [(p.content_type.app_label, p.content_type.model)
for p in Permission.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
apps_models = [(ct.app_label, ct.model)
for ct in ContentType.objects.all()]
self.assertIn(('swappable_models', 'alternatearticle'), apps_models)
self.assertNotIn(('swappable_models', 'article'), apps_models)
@override_settings(TEST_ARTICLE_MODEL='swappable_models.article')
def test_case_insensitive(self):
"Model names are case insensitive. Check that model swapping honors this."
try:
Article.objects.all()
except AttributeError:
self.fail('Swappable model names should be case insensitive.')
self.assertIsNone(Article._meta.swapped)
| bsd-3-clause | 5,159,018,456,522,786,000 | 39.679245 | 101 | 0.688776 | false |
sunqm/pyscf | pyscf/gto/basis/faegre_dz.py | 2 | 2441 | Br = [[0, -1, [1852191.95,1]], [0, -1, [277468.432,1]], [0, -1, [63146.2222,1]],
[0, -1, [17882.2426,1]], [0, -1, [5831.1276,1]], [0, -1, [2102.97002,1]],
[0, -1, [818.081869,1]], [0, -1, [337.178276,1]], [0, -1, [144.372116,1]],
[0, -1, [59.4416183,1]], [0, -1, [27.8399271,1]], [0, -1, [12.7957627,1]],
[0, -1, [4.49404245,1]], [0, -1, [1.94533783,1]], [0, -1, [0.907029776,1]],
[0, -1, [0.4229101,1]], [0, -1, [0.158834312,1]], [0, -1, [0.0529447707,1]],
[1, 0, [5679.727,1]], [1, 0, [1346.36352,1]], [1, 0, [435.395818,1]],
[1, 0, [164.793216,1]], [1, 0, [68.5017712,1]], [1, 0, [30.1368639,1]],
[1, 0, [13.6468981,1]], [1, 0, [5.82038047,1]], [1, 0, [2.45254721,1]],
[1, 0, [0.900766868,1]], [1, 0, [0.346830479,1]], [1, 0, [0.117653551,1]],
[1, 0, [0.0392178503,1]],
[2, 0, [403.141938,1]], [2, 0, [120.994865,1]], [2, 0, [46.2907462,1]],
[2, 0, [19.7020627,1]], [2, 0, [8.85441922,1]], [2, 0, [3.99399609,1]],
[2, 0, [1.76371836,1]], [2, 0, [0.706407907,1]], [2, 0, [0.235469302,1]], ]
I = [[0, -1, [16319390.1,1]], [0, -1, [2443812.29,1]], [0, -1, [556146.045,1]],
[0, -1, [157521.745,1]], [0, -1, [51386.5851,1]], [0, -1, [18549.0727,1]],
[0, -1, [7233.10747,1]], [0, -1, [2998.65553,1]], [0, -1, [1306.46816,1]],
[0, -1, [592.591389,1]], [0, -1, [275.524546,1]], [0, -1, [118.862916,1]],
[0, -1, [60.0487757,1]], [0, -1, [29.9822303,1]], [0, -1, [13.0601974,1]],
[0, -1, [6.62773403,1]], [0, -1, [2.60128989,1]], [0, -1, [1.22807917,1]],
[0, -1, [0.606110391,1]], [0, -1, [0.299141794,1]], [0, -1, [0.117829615,1]],
[0, -1, [0.047131846,1]],
[1, 0, [37028.0579,1]], [1, 0, [8769.75781,1]], [1, 0, [2847.26606,1]],
[1, 0, [1087.66677,1]], [1, 0, [460.509647,1]], [1, 0, [209.214159,1]],
[1, 0, [99.7061331,1]], [1, 0, [48.9875865,1]], [1, 0, [23.8320263,1]],
[1, 0, [11.9123581,1]], [1, 0, [5.98212311,1]], [1, 0, [2.85353951,1]],
[1, 0, [1.32214681,1]], [1, 0, [0.497164082,1]], [1, 0, [0.209955502,1]],
[1, 0, [0.0802605438,1]], [1, 0, [0.0321042175,1]],
[2, 0, [1721.59127,1]], [2, 0, [519.762117,1]], [2, 0, [202.358187,1]],
[2, 0, [88.9255359,1]], [2, 0, [41.9418042,1]], [2, 0, [20.4459351,1]],
[2, 0, [10.1793363,1]], [2, 0, [4.99896672,1]], [2, 0, [2.39173872,1]],
[2, 0, [1.10127739,1]], [2, 0, [0.454143728,1]], [2, 0, [0.181657491,1]], ]
| apache-2.0 | 3,826,817,444,269,580,300 | 70.794118 | 83 | 0.45637 | false |
DavidNorman/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py | 4 | 4273 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndFilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _map_and_filter_fusion_test_cases():
"""Generates test cases for the MapAndFilterFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
expected_output = []
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if self.evaluate(b):
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@parameterized.named_parameters(*_map_and_filter_fusion_test_cases())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map", "Filter",
"Map"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map",
"Filter"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
if __name__ == "__main__":
test.main()
| apache-2.0 | -9,201,176,295,065,049,000 | 37.151786 | 80 | 0.689679 | false |
vyrus/wubi | src/openpgp/sap/pkt/CompressedData.py | 9 | 3968 | """Compressed Data RFC 2440.2.3, 2440.5.6
A compressed data packet simply contains OpenPGP message data in a
single packet. In other words, a signature, public key, signed
message or any other valid message type (see rfc2440 10.2) can be
encapsulated in a single compressed data packet. Normally, all this
data is, um, compressed ..but it is also permissible to package
uncompressed data in the "compressed" data packet.
Operations on a compressed data packet normally follow this pattern:
1. decompress compressed data (if necessary)
2. re-evaluate data as an OpenPGP message
3. repeat as needed for nested compressed data packets
Decompressing Data
The compressed data in `_comp_d` can be decompressed and
retrieved via the data attribute (see above). However, the
decompressed data is not an instance attribute. Instead, it
calls the appropriate decompression function and returns the
result. Therefore, it's probably best to save the decompressed
data elsewhere instead of making repeated calls to
compressed_data_obj.data.
"""
import zlib
from Packet import Packet
from openpgp.code import *
class CompressedData(Packet):
__doc__ = """Compressed Data Packet
""" + Packet._ivars
def __init__(self, *args, **kwords):
try:
self.fill(args[0])
except IndexError:
pass
def fill_body(self, d):
self.body = CompressedDataBody(d)
class CompressedDataBody:
"""Compressed Data
:IVariables:
- `alg`: integer compression algorithm code
- `data`: decompressed data
- `_d`: string of raw packet body data
- `_comp_d`: string of compressed data (_d[1:])
"""
def __init__(self, *args, **kwords):
try:
self.fill(args[0])
except IndexError:
pass
#def __getattr__(self, name):
# I don't think that decompression is something that needs to be done
# automatically.
# if 'data' == name:
# return self.__decompress()
# else:
# return self.__dict__[name]
def fill(self, d):
self._d = d
self.alg = ord(d[0])
self._comp_d = d[1:]
self.data = self.decompress()
def decompress(self):
if COMP_UNCOMPRESSED == self.alg:
data = self._comp_d
# From 5.6: PGP2.6 uses 13 bits of compression ~ too bad
elif COMP_ZIP == self.alg: # ..from zipfile.py source
dc = zlib.decompressobj(-15)
bytes = dc.decompress(self._comp_d)
ex = dc.decompress('Z') + dc.flush()
if ex:
bytes = bytes + ex
data = bytes
elif COMP_ZLIB == self.alg:
dc = zlib.decompressobj()
data = dc.decompress(self._comp_d)
# 100-110:Private/Experimental
#elif self.alg in range(100, 111):
# raise NotImplementedError("Unsupported compression algorithm->(%s)" % (str(self.alg)))
else:
raise NotImplementedError("Unsupported compression algorithm->(%s)" % self.alg)
return data
def create_CompressedDataBody(alg, d):
"""Create a CompressedDataBody instance.
:Parameters:
- `alg`: integer compressed algorithm constant
- `d`: string data to compress
:Returns: `CompressedDataBody` instance
"""
if COMP_ZIP == alg: # ..from zipfile.py source
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
data = cmpr.compress(d)
ex = cmpr.flush()
if ex:
data = data + ex
elif COMP_ZLIB == alg:
cmpr = zlib.compressobj()
data = cmpr.compress(d)
ex = cmpr.flush()
if ex:
data = data + ex
elif COMP_UNCOMPRESSED == alg:
data = d
else:
raise NotImplementedError("Unsupported compression algorithm->(%s)" % alg)
return CompressedDataBody(''.join([chr(alg), data]))
| gpl-2.0 | -3,095,134,076,281,938,400 | 32.066667 | 99 | 0.613155 | false |
EarthSystemCoG/COG | filebrowser/settings.py | 2 | 7031 | # coding: utf-8
# imports
import os
# django imports
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# settings for django-tinymce
#FIXME: remove the tinymce content, we no longer use it.
try:
import tinymce.settings
DEFAULT_URL_TINYMCE = tinymce.settings.JS_BASE_URL + '/'
DEFAULT_PATH_TINYMCE = tinymce.settings.JS_ROOT + '/'
except ImportError:
DEFAULT_URL_TINYMCE = settings.STATIC_URL + "grappelli/tinymce/jscripts/tiny_mce/"
DEFAULT_PATH_TINYMCE = os.path.join(settings.STATIC_ROOT, 'grappelli/tinymce/jscripts/tiny_mce/')
# PATH AND URL SETTINGS
# Main Media Settings
# WARNING: FILEBROWSER_MEDIA_ROOT and FILEBROWSER_MEDIA_URL will be removed in the next major release of Filebrowser.
# Read the documentation on FileBrowser's storages
# (http://readthedocs.org/docs/django-filebrowser/en/latest/file_storages.html)
MEDIA_ROOT = getattr(settings, "FILEBROWSER_MEDIA_ROOT", settings.MEDIA_ROOT)
MEDIA_URL = getattr(settings, "FILEBROWSER_MEDIA_URL", settings.MEDIA_URL)
# Main FileBrowser Directory. This has to be a directory within MEDIA_ROOT.
# Leave empty in order to browse all files under MEDIA_ROOT.
# DO NOT USE A SLASH AT THE BEGINNING, DO NOT FORGET THE TRAILING SLASH AT THE END.
DIRECTORY = getattr(settings, "FILEBROWSER_DIRECTORY", 'uploads/')
# The URL/PATH to your filebrowser media-files.
URL_FILEBROWSER_MEDIA = getattr(settings, "FILEBROWSER_URL_FILEBROWSER_MEDIA", os.path.join(settings.STATIC_URL,
'filebrowser/'))
PATH_FILEBROWSER_MEDIA = getattr(settings, "FILEBROWSER_PATH_FILEBROWSER_MEDIA", os.path.join(settings.STATIC_ROOT,
'filebrowser/'))
# The URL/PATH to your TinyMCE Installation.
URL_TINYMCE = getattr(settings, "FILEBROWSER_URL_TINYMCE", DEFAULT_URL_TINYMCE)
PATH_TINYMCE = getattr(settings, "FILEBROWSER_PATH_TINYMCE", DEFAULT_PATH_TINYMCE)
# EXTENSIONS AND FORMATS
# Allowed Extensions for File Upload. Lower case is important.
# Types that are coded as Image, have a size selector in the File Browser.
EXTENSIONS = getattr(settings, "FILEBROWSER_EXTENSIONS", {
'Folder': [''],
'Image': ['.jpg', '.jpeg', '.gif', '.png', '.tif', '.tiff', '.pdf'],
'Document': ['.pdf', '.pptx', '.ppt', '.rtf', '.txt', '.xls', '.xlsx', '.doc', '.docx', '.csv'],
'Video': ['.mov', '.wmv', '.mpeg', '.mpg', '.avi', '.rm'],
'Audio': ['.mp3', '.mp4', '.wav', '.aiff', '.midi', '.m4p']
})
# Define different formats for allowed selections.
# This has to be a subset of EXTENSIONS.
# e.g., add ?type=image to the browse-URL ...
# including Document as an image enables them to be uploaded via the Link Browser->File Browser in the wiki.
SELECT_FORMATS = getattr(settings, "FILEBROWSER_SELECT_FORMATS", {
'file': ['Folder', 'Image', 'Document', 'Video', 'Audio'],
'image': ['Image', 'Document'],
'document': ['Document'],
'media': ['Video', 'Audio'],
})
# VERSIONS
# Directory to Save Image Versions (and Thumbnails). Relative to MEDIA_ROOT.
# If no directory is given, versions are stored within the Image directory.
# VERSION URL: VERSIONS_BASEDIR/original_path/originalfilename_versionsuffix.extension
VERSIONS_BASEDIR = getattr(settings, 'FILEBROWSER_VERSIONS_BASEDIR', '')
# Versions Format. Available Attributes: verbose_name, width, height, opts
VERSIONS = getattr(settings, "FILEBROWSER_VERSIONS", {
'admin_thumbnail': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'},
'thumbnail': {'verbose_name': 'Thumbnail (1 col)', 'width': 60, 'height': 60, 'opts': 'crop'},
'small': {'verbose_name': 'Small (2 col)', 'width': 140, 'height': '', 'opts': ''},
'medium': {'verbose_name': 'Medium (4col )', 'width': 300, 'height': '', 'opts': ''},
'big': {'verbose_name': 'Big (6 col)', 'width': 460, 'height': '', 'opts': ''},
'large': {'verbose_name': 'Large (8 col)', 'width': 680, 'height': '', 'opts': ''},
})
# Quality of saved versions
VERSION_QUALITY = getattr(settings, 'FILEBROWSER_VERSION_QUALITY', 90)
# Versions available within the Admin-Interface.
ADMIN_VERSIONS = getattr(settings, 'FILEBROWSER_ADMIN_VERSIONS', ['thumbnail', 'small', 'medium', 'big', 'large'])
# Which Version should be used as Admin-thumbnail.
ADMIN_THUMBNAIL = getattr(settings, 'FILEBROWSER_ADMIN_THUMBNAIL', 'admin_thumbnail')
# PLACEHOLDER
# Path to placeholder image
PLACEHOLDER = getattr(settings, "FILEBROWSER_PLACEHOLDER", "")
# Show Placeholder if the original image does not exist
SHOW_PLACEHOLDER = getattr(settings, "FILEBROWSER_SHOW_PLACEHOLDER", False)
# Always show placeholder (even if the original image exists)
FORCE_PLACEHOLDER = getattr(settings, "FILEBROWSER_FORCE_PLACEHOLDER", False)
# EXTRA SETTINGS
# If set to True, the FileBrowser will not try to import a mis-installed PIL.
STRICT_PIL = getattr(settings, 'FILEBROWSER_STRICT_PIL', False)
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
IMAGE_MAXBLOCK = getattr(settings, 'FILEBROWSER_IMAGE_MAXBLOCK', 1024*1024)
# Exclude files matching any of the following regular expressions
# Default is to exclude 'thumbnail' style naming of image-thumbnails.
EXTENSION_LIST = []
for exts in EXTENSIONS.values():
EXTENSION_LIST += exts
EXCLUDE = getattr(settings, 'FILEBROWSER_EXCLUDE', (r'_(%(exts)s)_.*_q\d{1,3}\.(%(exts)s)'
% {'exts': ('|'.join(EXTENSION_LIST))},))
# Max. Upload Size in Bytes.
MAX_UPLOAD_SIZE = getattr(settings, "FILEBROWSER_MAX_UPLOAD_SIZE", 10485760)
# Normalize filename and remove all non-alphanumeric characters
# except for underscores, spaces & dashes.
NORMALIZE_FILENAME = getattr(settings, "FILEBROWSER_NORMALIZE_FILENAME", False)
# Convert Filename (replace spaces and convert to lowercase)
CONVERT_FILENAME = getattr(settings, "FILEBROWSER_CONVERT_FILENAME", True)
# Max. Entries per Page
# Loading a Sever-Directory with lots of files might take a while
# Use this setting to limit the items shown
LIST_PER_PAGE = getattr(settings, "FILEBROWSER_LIST_PER_PAGE", 50)
# Default Sorting
# Options: date, filesize, filename_lower, filetype_checked
DEFAULT_SORTING_BY = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_BY", "date")
# Sorting Order: asc, desc
DEFAULT_SORTING_ORDER = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_ORDER", "desc")
# regex to clean dir names before creation
FOLDER_REGEX = getattr(settings, "FILEBROWSER_FOLDER_REGEX", r'^[\w._\ /-]+$')
# Traverse directories when searching
SEARCH_TRAVERSE = getattr(settings, "FILEBROWSER_SEARCH_TRAVERSE", False)
# Default Upload and Version Permissions
DEFAULT_PERMISSIONS = getattr(settings, "FILEBROWSER_DEFAULT_PERMISSIONS", 0755)
# EXTRA TRANSLATION STRINGS
# The following strings are not availabe within views or templates
_('Folder')
_('Image')
_('Video')
_('Document')
_('Audio')
| bsd-3-clause | -6,834,304,391,566,928,000 | 51.864662 | 117 | 0.696914 | false |
hogarthj/ansible | test/units/modules/network/f5/test_bigip_snmp_trap.py | 23 | 6579 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.compat.tests.mock import DEFAULT
from ansible.module_utils.basic import AnsibleModule
try:
from library.bigip_snmp_trap import NetworkedParameters
from library.bigip_snmp_trap import NonNetworkedParameters
from library.bigip_snmp_trap import ModuleManager
from library.bigip_snmp_trap import NetworkedManager
from library.bigip_snmp_trap import NonNetworkedManager
from library.bigip_snmp_trap import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_snmp_trap import NetworkedParameters
from ansible.modules.network.f5.bigip_snmp_trap import NonNetworkedParameters
from ansible.modules.network.f5.bigip_snmp_trap import ModuleManager
from ansible.modules.network.f5.bigip_snmp_trap import NetworkedManager
from ansible.modules.network.f5.bigip_snmp_trap import NonNetworkedManager
from ansible.modules.network.f5.bigip_snmp_trap import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_networked_parameters(self):
args = dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
password='password',
server='localhost',
user='admin'
)
p = NetworkedParameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network == 'other'
def test_module_non_networked_parameters(self):
args = dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
password='password',
server='localhost',
user='admin'
)
p = NonNetworkedParameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network is None
def test_api_parameters(self):
args = dict(
name='foo',
community='public',
host='10.10.10.10',
network='other',
version=1,
port=1000
)
p = NetworkedParameters(params=args)
assert p.name == 'foo'
assert p.snmp_version == '1'
assert p.community == 'public'
assert p.destination == '10.10.10.10'
assert p.port == 1000
assert p.network == 'other'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_trap(self, *args):
set_module_args(dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
network='other',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_non_networked = Mock(return_value=False)
patches = dict(
create_on_device=DEFAULT,
exists=DEFAULT
)
with patch.multiple(NetworkedManager, **patches) as mo:
mo['create_on_device'].side_effect = Mock(return_value=True)
mo['exists'].side_effect = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['port'] == 1000
assert results['snmp_version'] == '1'
def test_create_trap_non_network(self, *args):
set_module_args(dict(
name='foo',
snmp_version='1',
community='public',
destination='10.10.10.10',
port=1000,
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.is_version_non_networked = Mock(return_value=True)
patches = dict(
create_on_device=DEFAULT,
exists=DEFAULT
)
with patch.multiple(NonNetworkedManager, **patches) as mo:
mo['create_on_device'].side_effect = Mock(return_value=True)
mo['exists'].side_effect = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['port'] == 1000
assert results['snmp_version'] == '1'
| gpl-3.0 | 5,188,336,974,116,207,000 | 31.895 | 91 | 0.612555 | false |
gopal1cloud/neutron | neutron/agent/linux/ovsdb_monitor.py | 5 | 3670 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from neutron.agent.linux import async_process
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class OvsdbMonitor(async_process.AsyncProcess):
"""Manages an invocation of 'ovsdb-client monitor'."""
def __init__(self, table_name, columns=None, format=None,
root_helper=None, respawn_interval=None):
cmd = ['ovsdb-client', 'monitor', table_name]
if columns:
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd,
root_helper=root_helper,
respawn_interval=respawn_interval)
def _read_stdout(self):
data = self._process.stdout.readline()
if not data:
return
self._stdout_lines.put(data)
LOG.debug(_('Output received from ovsdb monitor: %s') % data)
return data
def _read_stderr(self):
data = super(OvsdbMonitor, self)._read_stderr()
if data:
LOG.error(_('Error received from ovsdb monitor: %s') % data)
# Do not return value to ensure that stderr output will
# stop the monitor.
class SimpleInterfaceMonitor(OvsdbMonitor):
"""Monitors the Interface table of the local host's ovsdb for changes.
The has_updates() method indicates whether changes to the ovsdb
Interface table have been detected since the monitor started or
since the previous access.
"""
def __init__(self, root_helper=None, respawn_interval=None):
super(SimpleInterfaceMonitor, self).__init__(
'Interface',
columns=['name', 'ofport'],
format='json',
root_helper=root_helper,
respawn_interval=respawn_interval,
)
self.data_received = False
@property
def is_active(self):
return (self.data_received and
self._kill_event and
not self._kill_event.ready())
@property
def has_updates(self):
"""Indicate whether the ovsdb Interface table has been updated.
True will be returned if the monitor process is not active.
This 'failing open' minimizes the risk of falsely indicating
the absence of updates at the expense of potential false
positives.
"""
return bool(list(self.iter_stdout())) or not self.is_active
def start(self, block=False, timeout=5):
super(SimpleInterfaceMonitor, self).start()
if block:
eventlet.timeout.Timeout(timeout)
while not self.is_active:
eventlet.sleep()
def _kill(self, *args, **kwargs):
self.data_received = False
super(SimpleInterfaceMonitor, self)._kill(*args, **kwargs)
def _read_stdout(self):
data = super(SimpleInterfaceMonitor, self)._read_stdout()
if data and not self.data_received:
self.data_received = True
return data
| apache-2.0 | 8,525,257,399,254,136,000 | 33.952381 | 78 | 0.622071 | false |
gurumaia/dynatrace-slack-bot | dynabot.py | 1 | 2339 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import requests
import requests
from requests.auth import HTTPBasicAuth
from slackclient import SlackClient
import dynatrace
import dashboards
# Dynatrace Bot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "I'm not sure what you mean. Can you try again? Here's a list of available options:```"+str(dashboards.dashboards.keys())+'```'
for dash in dashboards.dashboards.keys():
if dash in command.lower():
data = dashboards.dashboards[dash]['function'](dashboards.dashboards[dash]['name'],dashboards.dashboards[dash]['timeframe'])
response = '```'+data+'```'
break
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("Dynatrace Bot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| gpl-3.0 | -7,732,812,770,964,779,000 | 34.984615 | 142 | 0.63959 | false |
demonchild2112/travis-test | grr/core/grr_response_core/version.py | 2 | 1239 | #!/usr/bin/env python
"""GRR Rapid Response Framework."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import configparser
from grr_response_core.lib import package
def VersionPath():
"""Returns a path to version.ini."""
# Try to get a version.ini. It should be in the resources if the code
# was packed with "pip sdist". It will be 2 levels up from grr_response_core
# if the code was installed via "pip install -e".
version_ini = (
package.ResourcePath("grr-response-core", "version.ini") or
package.ResourcePath("grr-response-core", "../../version.ini"))
if not os.path.exists(version_ini):
raise RuntimeError("Can't find version.ini at %s" % version_ini)
return version_ini
def Version():
"""Return a dict with GRR version information."""
version_ini = VersionPath()
config = configparser.SafeConfigParser()
config.read(version_ini)
return dict(
packageversion=config.get("Version", "packageversion"),
major=config.getint("Version", "major"),
minor=config.getint("Version", "minor"),
revision=config.getint("Version", "revision"),
release=config.getint("Version", "release"))
| apache-2.0 | -3,116,742,367,454,153,700 | 27.813953 | 78 | 0.694108 | false |
avneesh91/django | django/db/transaction.py | 10 | 11230 | from contextlib import ContextDecorator
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
class TransactionManagementError(ProgrammingError):
"""Transaction management is used improperly."""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
| bsd-3-clause | 5,562,153,189,974,124,000 | 37.858131 | 97 | 0.589938 | false |
Suwmlee/XX-Net | Python3/lib/contextlib.py | 8 | 12261 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack",
"redirect_stdout", "redirect_stderr", "suppress"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, args, kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, self.args, self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed.
return exc is not value
except RuntimeError as exc:
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
if exc.__cause__ is value:
return False
raise
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, args, kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class _RedirectStream:
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| bsd-2-clause | -1,798,120,594,885,289,700 | 32.963989 | 80 | 0.580621 | false |
thatch45/svt | setup.py | 1 | 1490 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Import python libs
import os
import sys
import shutil
if 'USE_SETUPTOOLS' in os.environ or 'setuptools' in sys.modules:
from setuptools import setup
from setuptools import Command
else:
from distutils.core import setup
from distutils.core import Command
NAME = 'svt'
DESC = ('Standalone port of the salt-vt terminal emulation system')
# Version info -- read without importing
_locals = {}
with open('svt/version.py') as fp:
exec(fp.read(), None, _locals)
VERSION = _locals['version']
class Clean(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for subdir in ('svt'):
for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)):
for dir_ in dirs:
if dir_ == '__pycache__':
shutil.rmtree(os.path.join(root, dir_))
setup(name=NAME,
author='Thomas S Hatch',
author_email='[email protected]',
version=VERSION,
description=DESC,
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
],
packages=[
'svt',
],
cmdclass={'clean': Clean},
)
| apache-2.0 | -1,472,699,939,748,658,200 | 24.689655 | 94 | 0.589262 | false |
dracos/QGIS | python/plugins/processing/algs/taudem/dinfdistup_multi.py | 7 | 4893 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinfdistup_multi.py
---------------------
Date : March 2015
Copyright : (C) 2015 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'March 2015'
__copyright__ = '(C) 2015, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputDirectory
from TauDEMUtils import TauDEMUtils
class DinfDistUpMulti(GeoAlgorithm):
DINF_FLOW_DIR_GRID = 'DINF_FLOW_DIR_GRID'
PIT_FILLED_GRID = 'PIT_FILLED_GRID'
SLOPE_GRID = 'SLOPE_GRID'
THRESHOLD = 'THRESHOLD'
STAT_METHOD = 'STAT_METHOD'
DIST_METHOD = 'DIST_METHOD'
EDGE_CONTAM = 'EDGE_CONTAM'
DIST_UP_GRID = 'DIST_UP_GRID'
STATISTICS = ['Minimum', 'Maximum', 'Average']
STAT_DICT = {0: 'min', 1: 'max', 2: 'ave'}
DISTANCE = ['Pythagoras', 'Horizontal', 'Vertical', 'Surface']
DIST_DICT = {
0: 'p',
1: 'h',
2: 'v',
3: 's',
}
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name = 'D-Infinity Distance Up (multifile)'
self.cmdName = 'dinfdistup'
self.group = 'Specialized Grid Analysis tools'
self.addParameter(ParameterFile(self.DINF_FLOW_DIR_GRID,
self.tr('D-Infinity Flow Direction Grid'), True, False))
self.addParameter(ParameterFile(self.PIT_FILLED_GRID,
self.tr('Pit Filled Elevation Grid'), True, False))
self.addParameter(ParameterFile(self.SLOPE_GRID,
self.tr('Slope Grid'), True, False))
self.addParameter(ParameterSelection(self.STAT_METHOD,
self.tr('Statistical Method'), self.STATISTICS, 2))
self.addParameter(ParameterSelection(self.DIST_METHOD,
self.tr('Distance Method'), self.DISTANCE, 1))
self.addParameter(ParameterNumber(self.THRESHOLD,
self.tr('Proportion Threshold'), 0, None, 0.5))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM,
self.tr('Check for edge contamination'), True))
self.addOutput(OutputDirectory(self.DIST_UP_GRID,
self.tr('D-Infinity Distance Up')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(str(processNum))
commands.append(os.path.join(TauDEMUtils.taudemMultifilePath(), self.cmdName))
commands.append('-ang')
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append('-fel')
commands.append(self.getParameterValue(self.PIT_FILLED_GRID))
commands.append('-m')
commands.append(str(self.STAT_DICT[self.getParameterValue(
self.STAT_METHOD)]))
commands.append(str(self.DIST_DICT[self.getParameterValue(
self.DIST_METHOD)]))
commands.append('-thresh')
commands.append(str(self.getParameterValue(self.THRESHOLD)))
if not self.getParameterValue(self.EDGE_CONTAM):
commands.append('-nc')
commands.append('-du')
commands.append(self.getOutputValue(self.DIST_UP_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 | 7,269,949,606,276,485,000 | 38.459677 | 86 | 0.601471 | false |
igboyes/virtool | virtool/history/utils.py | 2 | 6175 | import arrow
from typing import Tuple, Union, List
import datetime
import os
import json
import dictdiffer
import aiofiles
def calculate_diff(old: dict, new: dict) -> list:
"""
Calculate the diff for a joined otu document before and after modification.
:param old: the joined otu document before modification
:param new: the joined otu document after modification
:return: the diff
"""
return list(dictdiffer.diff(old, new))
def compose_create_description(document: dict) -> str:
"""
Compose a change description for the creation of a new OTU given its document.
:param document: the OTU document
:return: a change description
"""
name = document["name"]
abbreviation = document.get("abbreviation")
# Build a ``description`` field for the otu creation change document.
description = f"Created {name}"
# Add the abbreviation to the description if there is one.
if abbreviation:
return f"{description} ({abbreviation})"
return description
def compose_edit_description(
name: Union[str, None],
abbreviation: Union[str, None],
old_abbreviation: Union[str, None],
schema: Union[dict, None]
):
"""
Compose a change description for an edit on an existing OTU.
:param name: an updated name value
:param abbreviation: an updated abbreviation value
:param old_abbreviation: the old abbreviation value
:param schema: a new schema `dict`
:return: a change description
"""
description = None
if name:
description = f"Changed name to {name}"
if abbreviation is not None:
# Abbreviation is being removed.
if abbreviation == "" and old_abbreviation:
abbreviation_phrase = f"removed abbreviation {old_abbreviation}"
# Abbreviation is being added where one didn't exist before
elif abbreviation and not old_abbreviation:
abbreviation_phrase = f"added abbreviation {abbreviation}"
# Abbreviation is being changed from one value to another.
else:
abbreviation_phrase = f"changed abbreviation to {abbreviation}"
if description:
description = f"{description} and {abbreviation_phrase}"
else:
description = abbreviation_phrase[:1].upper() + abbreviation_phrase[1:]
if schema is not None:
if description:
description += " and modified schema"
else:
description = "Modified schema"
return description
def compose_remove_description(document: dict) -> str:
"""
Compose a change description for removing an OTU.
:param document: the OTU document that is being removed
:return: a change description
"""
name = document["name"]
abbreviation = document.get("abbreviation")
description = f"Removed {name}"
if abbreviation:
return f"{description} ({abbreviation})"
return description
def derive_otu_information(old: Union[dict, None], new: Union[dict, None]) -> Tuple[str, str, Union[int, str], str]:
"""
Derive OTU information for a new change document from the old and new joined OTU documents.
:param old: the old, joined OTU document
:param new: the new, joined OTU document
:return: the parent reference ID and otu ID, name, and abbreviation
"""
try:
otu_id = old["_id"]
except TypeError:
otu_id = new["_id"]
try:
otu_name = old["name"]
except TypeError:
otu_name = new["name"]
try:
otu_version = int(new["version"])
except (TypeError, KeyError):
otu_version = "removed"
try:
ref_id = old["reference"]["id"]
except (TypeError, KeyError):
ref_id = new["reference"]["id"]
return otu_id, otu_name, otu_version, ref_id
def join_diff_path(data_path: str, otu_id: str, otu_version: Union[int, str]) -> str:
"""
Derive the path to a diff file based on the application `data_path` setting and the OTU ID and version.
:param data_path: the application data path settings
:param otu_id: the OTU ID to join a diff path for
:param otu_version: the OTU version to join a diff path for
:return: the change path
"""
return os.path.join(data_path, "history", f"{otu_id}_{otu_version}.json")
def json_encoder(o):
"""
A custom JSON encoder function that stores `datetime` objects as ISO format date strings.
:param o: a JSON value object
:return: the object converted to a `datetime` if necessary
"""
if isinstance(o, datetime.datetime):
return arrow.get(o).isoformat()
return o
def json_object_hook(o: dict) -> dict:
"""
A JSON decoder hook for converting `created_at` fields from ISO format dates to `datetime` objects.
:param o: the JSON parsing dict
:return: the parsed dict
"""
for key, value in o.items():
if key == "created_at":
o[key] = arrow.get(value).naive
return o
async def read_diff_file(data_path, otu_id, otu_version):
"""
Read a history diff JSON file.
"""
path = join_diff_path(data_path, otu_id, otu_version)
async with aiofiles.open(path, "r") as f:
return json.loads(await f.read(), object_hook=json_object_hook)
async def remove_diff_files(app, id_list: List[str]):
"""
Remove multiple diff files given a list of change IDs (`id_list`).
:param app: the application object
:param id_list: a list of change IDs to remove diff files for
"""
data_path = app["settings"]["data_path"]
for change_id in id_list:
otu_id, otu_version = change_id.split(".")
path = join_diff_path(
data_path,
otu_id,
otu_version
)
try:
await app["run_in_thread"](os.remove, path)
except FileNotFoundError:
pass
async def write_diff_file(data_path, otu_id, otu_version, body):
path = join_diff_path(data_path, otu_id, otu_version)
async with aiofiles.open(path, "w") as f:
json_string = json.dumps(body, default=json_encoder)
await f.write(json_string)
| mit | -5,658,471,045,765,186,000 | 26.444444 | 116 | 0.639676 | false |
trabacus-softapps/openerp-8.0-cc | openerp/addons/project_timesheet/__openerp__.py | 6 | 2283 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bill Time on Tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
Synchronization of project task work entries with timesheet entries.
====================================================================
This module lets you transfer the entries under tasks defined for Project
Management to the Timesheet line entries for particular date and particular user
with the effect of creating, editing and deleting either ways.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/invoice_task_work.jpeg', 'images/my_timesheet.jpeg', 'images/working_hour.jpeg'],
'depends': ['resource', 'project', 'hr_timesheet_sheet', 'hr_timesheet_invoice', 'account_analytic_analysis', 'procurement'],
'data': [
'security/ir.model.access.csv',
'security/project_timesheet_security.xml',
'process/project_timesheet_process.xml',
'report/task_report_view.xml',
'project_timesheet_view.xml',
],
'demo': ['project_timesheet_demo.xml'],
'test': [
'test/worktask_entry_to_timesheetline_entry.yml',
'test/work_timesheet.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,332,601,086,707,830,000 | 42.075472 | 129 | 0.61717 | false |
liangtianyou/ST | stserver/libcommon/web/utils.py | 2 | 40122 | #!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
__all__ = [
"Storage", "storage", "storify",
"Counter", "counter",
"iters",
"rstrips", "lstrips", "strips",
"safeunicode", "safestr", "utf8",
"TimeoutError", "timelimit",
"Memoize", "memoize",
"re_compile", "re_subm",
"group", "uniq", "iterview",
"IterBetter", "iterbetter",
"safeiter", "safewrite",
"dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
"requeue", "restack",
"listget", "intget", "datestr",
"numify", "denumify", "commify", "dateify",
"nthstr", "cond",
"CaptureStdout", "capturestdout", "Profile", "profile",
"tryall",
"ThreadedDict", "threadeddict",
"autoassign",
"to36",
"safemarkdown",
"sendmail"
]
import re, sys, time, threading, itertools, traceback, os
try:
import subprocess
except ImportError:
subprocess = None
try: import datetime
except ImportError: pass
try: set
except NameError:
from sets import Set as set
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
>>> storify({'x': 'a'}, _unicode=True)
<Storage {'x': u'a'}>
>>> storify({'x': storage(value='a')}, x={}, _unicode=True)
<Storage {'x': <Storage {'value': 'a'}>}>
>>> storify({'x': storage(value='a')}, _unicode=True)
<Storage {'x': u'a'}>
"""
_unicode = defaults.pop('_unicode', False)
def unicodify(s):
if _unicode and isinstance(s, str): return safeunicode(s)
else: return s
def getvalue(x):
if hasattr(x, 'file') and hasattr(x, 'value'):
return x.value
elif hasattr(x, 'value'):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in defaults.iteritems():
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Counter(storage):
"""Keeps count of how many times something is added.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c
<Counter {'y': 1, 'x': 5}>
>>> c.most()
['x']
"""
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
"""Returns the keys with maximum count."""
m = max(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def least(self):
"""Returns the keys with mininum count."""
m = min(self.itervalues())
return [k for k, v in self.iteritems() if v == m]
def percent(self, key):
"""Returns what percentage a certain key is of all entries.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.percent('x')
0.75
>>> c.percent('y')
0.25
"""
return float(self[key])/sum(self.values())
def sorted_keys(self):
"""Returns keys sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_keys()
['x', 'y']
"""
return sorted(self.keys(), key=lambda k: self[k], reverse=True)
def sorted_values(self):
"""Returns values sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_values()
[2, 1]
"""
return [self[k] for k in self.sorted_keys()]
def sorted_items(self):
"""Returns items sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_items()
[('x', 2), ('y', 1)]
"""
return [(k, self[k]) for k in self.sorted_keys()]
def __repr__(self):
return '<Counter ' + dict.__repr__(self) + '>'
counter = Counter
iters = [list, tuple]
import __builtin__
if hasattr(__builtin__, 'set'):
iters.append(set)
if hasattr(__builtin__, 'frozenset'):
iters.append(set)
if sys.version_info < (2,6): # sets module deprecated in 2.6
try:
from sets import Set
iters.append(Set)
except ImportError:
pass
class _hack(tuple): pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if direction == 'l':
if text.startswith(remove):
return text[len(remove):]
elif direction == 'r':
if text.endswith(remove):
return text[:-len(remove)]
else:
raise ValueError, "Direction needs to be r or l."
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips('r', text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
"""
return _strips('l', text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
#user add
try:
return obj.decode(encoding)
except:
sencoding = 'GBK'
return obj.decode('GBK')
#old code
#return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
else:
if hasattr(obj, '__unicode__'):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode('utf-8')
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next') and hasattr(obj, '__iter__'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
# for backward-compatibility
utf8 = safestr
class TimeoutError(Exception): pass
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
TimeoutError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.isAlive():
raise TimeoutError, 'took too long'
if c.error:
raise c.error[0], c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
If `expires` is specified, values are recalculated after `expires` seconds.
If `background` is specified, values are recalculated in a separate thread.
>>> calls = 0
>>> def howmanytimeshaveibeencalled():
... global calls
... calls += 1
... return calls
>>> fastcalls = memoize(howmanytimeshaveibeencalled)
>>> howmanytimeshaveibeencalled()
1
>>> howmanytimeshaveibeencalled()
2
>>> fastcalls()
3
>>> fastcalls()
3
>>> import time
>>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
>>> fastcalls()
4
>>> fastcalls()
4
>>> time.sleep(.2)
>>> fastcalls()
5
>>> def slowfunc():
... time.sleep(.1)
... return howmanytimeshaveibeencalled()
>>> fastcalls = memoize(slowfunc, .2, background=True)
>>> fastcalls()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
7
>>> fastcalls = memoize(slowfunc, None, background=True)
>>> threading.Thread(target=fastcalls).start()
>>> time.sleep(.01)
>>> fastcalls()
9
"""
def __init__(self, func, expires=None, background=True):
self.func = func
self.cache = {}
self.expires = expires
self.background = background
self.running = {}
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
if not self.running.get(key):
self.running[key] = threading.Lock()
def update(block=False):
if self.running[key].acquire(block):
try:
self.cache[key] = (self.func(*args, **keywords), time.time())
finally:
self.running[key].release()
if key not in self.cache:
update(block=True)
elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
if self.background:
threading.Thread(target=update).start()
else:
update()
return self.cache[key][0]
memoize = Memoize
re_compile = memoize(re.compile) #@@ threadsafe?
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ''
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
def take(seq, n):
for i in xrange(n):
yield seq.next()
if not hasattr(seq, 'next'):
seq = iter(seq)
while True:
x = list(take(seq, size))
if x:
yield x
else:
break
def uniq(seq):
"""
Removes duplicate elements from a list.
>>> uniq([1,2,3,1,4,5,6])
[1, 2, 3, 4, 5, 6]
"""
seen = set()
result = []
for item in seq:
if item in seen: continue
seen.add(item)
result.append(item)
return result
def iterview(x):
"""
Takes an iterable `x` and returns an iterator over it
which prints its progress to stderr as it iterates through.
"""
WIDTH = 70
def plainformat(n, lenx):
return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
def bars(size, n, lenx):
val = int((float(n)*size)/lenx + 0.5)
if size - val:
spacing = ">" + (" "*(size-val))[1:]
else:
spacing = ""
return "[%s%s]" % ("="*val, spacing)
def eta(elapsed, n, lenx):
if n == 0:
return '--:--:--'
if n == lenx:
secs = int(elapsed)
else:
secs = int((elapsed/n) * (lenx-n))
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hrs, mins, secs)
def format(starttime, n, lenx):
out = plainformat(n, lenx) + ' '
if n == lenx:
end = ' '
else:
end = ' ETA '
end += eta(time.time() - starttime, n, lenx)
out += bars(WIDTH - len(out) - len(end), n, lenx)
out += end
return out
starttime = time.time()
lenx = len(x)
for n, y in enumerate(x):
sys.stderr.write('\r' + format(starttime, n, lenx))
yield y
sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
yield self.i.next()
self.c += 1
def __getitem__(self, i):
#todo: slices
if i < self.c:
raise IndexError, "already passed "+str(i)
try:
while i > self.c:
self.i.next()
self.c += 1
# now self.c == i
self.c += 1
return self.i.next()
except StopIteration:
raise IndexError, str(i)
def __nonzero__(self):
if hasattr(self, "__len__"):
return len(self) != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = self.i.next()
except StopIteration:
return False
else:
return True
iterbetter = IterBetter
def safeiter(it, cleanup=None, ignore_errors=True):
"""Makes an iterator safe by ignoring the exceptions occured during the iteration.
"""
def next():
while True:
try:
return it.next()
except StopIteration:
raise
except:
traceback.print_exc()
it = iter(it)
while True:
yield next()
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
f = file(filename + '.tmp', 'w')
f.write(content)
f.close()
os.rename(f.name, path)
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in mapping.iteritems()])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in dictionary.iteritems():
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in dictionary.iteritems():
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x
def restack(stack, index=0):
"""Returns the element at index after moving it to the top of stack.
>>> x = [1, 2, 3, 4]
>>> restack(x)
1
>>> x
[2, 3, 4, 1]
"""
x = stack.pop(index)
stack.append(x)
return x
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst)-1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in {
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }.iteritems():
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
>>> datestr(None)
''
"""
def agohence(n, what, divisor=None):
if divisor: n = n // divisor
out = str(abs(n)) + ' ' + what # '2 day'
if abs(n) != 1: out += 's' # '2 days'
out += ' ' # '2 days '
if n < 0:
out += 'from now'
else:
out += 'ago'
return out # '2 days ago'
oneday = 24 * 60 * 60
if not then: return ""
if not now: now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
elif type(then).__name__ == "date":
then = datetime.datetime(then.year, then.month, then.day)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, 'day')
out = then.strftime('%B %e') # e.g. 'June 13'
if then.year != now.year or deltadays < 0:
out += ', %s' % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, 'hour', 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, 'minute', 60)
else:
return agohence(deltaseconds, 'second')
deltamicroseconds = delta.microseconds
if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, 'millisecond', 1000)
return agohence(deltamicroseconds, 'microsecond')
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return ''.join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return ''.join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(123.0)
'123.0'
>>> commify(1234.5)
'1,234.5'
>>> commify(1234.56789)
'1,234.56789'
>>> commify('%.2f' % 1234.5)
'1,234.50'
>>> commify(None)
>>>
"""
if n is None: return None
n = str(n)
if '.' in n:
dollars, cents = n.split('.')
else:
dollars, cents = n, None
r = []
for i, c in enumerate(str(dollars)[::-1]):
if i and (not (i % 3)):
r.insert(0, ',')
r.insert(0, c)
out = ''.join(r)
if cents:
out += '.' + cents
return out
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]: return '%sth' % n
return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print "foo"
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
from cStringIO import StringIO
# Not threadsafe!
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): ##, **kw): kw unused
import hotshot, hotshot.stats, os, tempfile ##, time already imported
f, filename = tempfile.mkstemp()
os.close(f)
prof = hotshot.Profile(filename)
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
prof.close()
import cStringIO
out = cStringIO.StringIO()
stats = hotshot.stats.load(filename)
stats.stream = out
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(40)
stats.print_callers()
x = '\n\ntook '+ str(stime) + ' seconds\n'
x += out.getvalue()
# remove the tempfile
try:
os.remove(filename)
except IOError:
pass
return result, x
profile = Profile
import traceback
# hack for compatibility with Python 2.3:
if not hasattr(traceback, 'format_exc'):
from cStringIO import StringIO
def format_exc(limit=None):
strbuf = StringIO()
traceback.print_exc(limit, strbuf)
return strbuf.getvalue()
traceback.format_exc = format_exc
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in context.iteritems():
if not hasattr(value, '__call__'):
continue
if prefix and not key.startswith(prefix):
continue
print key + ':',
try:
r = value()
dictincr(results, r)
print r
except:
print 'ERROR'
dictincr(results, 'ERROR')
print ' ' + '\n '.join(traceback.format_exc().split('\n'))
print '-'*40
print 'results:'
for (key, value) in results.iteritems():
print ' '*2, str(key)+':', value
class ThreadedDict:
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
def __getattr__(self, key):
return getattr(self._getd(), key)
def __setattr__(self, key, value):
return setattr(self._getd(), key, value)
def __delattr__(self, key):
return delattr(self._getd(), key)
def __hash__(self):
return id(self)
def _getd(self):
t = threading.currentThread()
if not hasattr(t, '_d'):
# using __dict__ of thread as thread local storage
t._d = {}
# there could be multiple instances of ThreadedDict.
# use self as key
if self not in t._d:
t._d[self] = storage()
return t._d[self]
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self
<Storage {'a': 1, 'b': 2}>
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in locals.iteritems():
if key == 'self':
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0: raise ValueError, "must supply a positive integer"
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or '0'
r_url = re_compile('(?<!\()(http://(\S+))')
def safemarkdown(text):
"""
Converts text to HTML following the rules of Markdown, but blocking any
outside HTML input, so that only the things supported by Markdown
can be used. Also converts raw URLs to links.
(requires [markdown.py](http://webpy.org/markdown.py))
"""
from markdown import markdown
if text:
text = text.replace('<', '<')
# TODO: automatically get page title?
text = r_url.sub(r'<\1>', text)
text = markdown(text)
return text
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
Optionally cc, bcc and attachments can be specified as keyword arguments.
Attachments must be an iterable and each attachment can be either a
filename or a file object or a dictionary with filename, content and
optionally content_type keys.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
attachments = kw.pop("attachments", [])
mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
for a in attachments:
if isinstance(a, dict):
mail.attach(a['filename'], a['content'], a.get('content_type'))
elif hasattr(a, 'read'): # file
filename = os.path.basename(getattr(a, "name", ""))
content_type = getattr(a, 'content_type', None)
mail.attach(filename, a.read(), content_type)
elif isinstance(a, basestring):
f = open(a, 'rb')
content = f.read()
f.close()
filename = os.path.basename(a)
mail.attach(filename, content, None)
else:
raise ValueError, "Invalid attachment: %s" % repr(a)
mail.send()
class _EmailMessage:
def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
subject = safestr(subject)
message = safestr(message)
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(kw.get('cc', []))
bcc = listify(kw.get('bcc', []))
recipients = to_address + cc + bcc
import email.Utils
self.from_address = email.Utils.parseaddr(from_address)[1]
self.recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
self.headers = dictadd({
'From': from_address,
'To': ", ".join(to_address),
'Subject': subject
}, headers or {})
if cc:
self.headers['Cc'] = ", ".join(cc)
self.message = self.new_message()
self.message.add_header("Content-Transfer-Encoding", "7bit")
self.message.add_header("Content-Disposition", "inline")
self.message.add_header("MIME-Version", "1.0")
self.message.set_payload(message, 'utf-8')
self.multipart = False
def new_message(self):
from email.Message import Message
return Message()
def attach(self, filename, content, content_type=None):
if not self.multipart:
msg = self.new_message()
msg.add_header("Content-Type", "multipart/mixed")
msg.attach(self.message)
self.message = msg
self.multipart = True
import mimetypes
try:
from email import encoders
except:
from email import Encoders as encoders
content_type = content_type or mimetypes.guess_type(filename)[0] or "applcation/octet-stream"
msg = self.new_message()
msg.set_payload(content)
msg.add_header('Content-Type', content_type)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
if not content_type.startswith("text/"):
encoders.encode_base64(msg)
self.message.attach(msg)
def send(self):
try:
import webapi
except ImportError:
webapi = Storage(config=Storage())
for k, v in self.headers.iteritems():
self.message.add_header(k, v)
message_text = self.message.as_string()
if webapi.config.get('smtp_server'):
server = webapi.config.get('smtp_server')
port = webapi.config.get('smtp_port', 0)
username = webapi.config.get('smtp_username')
password = webapi.config.get('smtp_password')
debug_level = webapi.config.get('smtp_debuglevel', None)
starttls = webapi.config.get('smtp_starttls', False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(self.from_address, self.recipients, message_text)
smtpserver.quit()
else:
sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
assert not self.from_address.startswith('-'), 'security'
for r in self.recipients:
assert not r.startswith('-'), 'security'
cmd = [sendmail, '-f', self.from_address] + self.recipients
if subprocess:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.stdin.write(message_text)
p.stdin.close()
p.wait()
else:
i, o = os.popen2(cmd)
i.write(message)
i.close()
o.close()
del i, o
def __repr__(self):
return "<EmailMessage>"
def __str__(self):
return self.message.as_string()
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | -1,845,256,980,992,193,000 | 27.096639 | 101 | 0.513534 | false |
ciena/afkak | afkak/test/test_protocol.py | 1 | 6085 | # -*- coding: utf-8 -*-
# Copyright 2015 Cyan, Inc.
# Copyright 2018 Ciena Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from unittest import mock
from twisted.internet.address import IPv4Address
from twisted.internet.error import ConnectionDone, ConnectionLost
from twisted.logger import LogLevel, globalLogPublisher
from twisted.python.failure import Failure
from twisted.test.iosim import FakeTransport
from twisted.test.proto_helpers import (
EventLoggingObserver, StringTransportWithDisconnection,
)
from twisted.trial.unittest import SynchronousTestCase
from .._protocol import KafkaBootstrapProtocol, KafkaProtocol
from .logtools import capture_logging
class TheFactory(object):
"""
`TheFactory` implements the bits of `_KafkaBrokerClient` that
`_KafkaProtocol` interacts with.
"""
log = logging.getLogger(__name__).getChild('TheFactory')
def handleResponse(self, string):
"""Called for each response."""
def _connectionLost(self, reason):
"""Called when the connection goes down."""
class KafkaProtocolTests(unittest.TestCase):
"""Test `afkak._protocol.KafkaProtocol`"""
def test_stringReceived(self):
"""
The factory is notified of message receipt.
"""
kp = KafkaProtocol()
kp.factory = factory_spy = mock.Mock(wraps=TheFactory())
kp.stringReceived(b"testing")
factory_spy.handleResponse.assert_called_once_with(b"testing")
def test_connectionLost_cleanly(self):
"""
The factory is notified of connection loss.
"""
kp = KafkaProtocol()
kp.factory = factory_spy = mock.Mock(wraps=TheFactory())
reason = Failure(ConnectionDone())
kp.connectionLost(reason)
factory_spy._connectionLost.assert_called_once_with(reason)
self.assertIsNone(kp.factory)
def test_lengthLimitExceeded(self):
"""
An error is logged and the connection dropped when an oversized message
is received.
"""
too_long = KafkaProtocol.MAX_LENGTH + 1
peer = IPv4Address('TCP', '1.2.3.4', 1234)
kp = KafkaProtocol()
kp.factory = factory_spy = mock.Mock(wraps=TheFactory())
kp.transport = StringTransportWithDisconnection(peerAddress=peer)
kp.transport.protocol = kp
with capture_logging(logging.getLogger('afkak.protocol')) as records:
kp.lengthLimitExceeded(too_long)
self.assertEqual(1, len(factory_spy._connectionLost.mock_calls))
[record] = records
record.getMessage() # Formats okay.
self.assertEqual((
'Broker at %s sent a %d byte message, exceeding the size limit of %d. '
'Terminating connection.'
), record.msg)
self.assertEqual((peer, too_long, kp.MAX_LENGTH), record.args)
class KafkaBootstrapProtocolTests(SynchronousTestCase):
"""Test `afkak._protocol.KafkaBootstrapProtocol`
:ivar peer: Peer IAddress, an IPv4 address
:ivar protocol: `KafkaBootstrapProtocol` object, connected in setUp
:ivar transport: `FakeTransport` object associated with the protocol,
connected in setUp
"""
def setUp(self):
self.peer = IPv4Address('TCP', 'kafka', 9072)
self.protocol = KafkaBootstrapProtocol()
self.transport = FakeTransport(self.protocol, isServer=False, peerAddress=self.peer)
self.protocol.makeConnection(self.transport)
def test_one_request(self):
"""
Happy path: a request is made and a response received.
"""
correlation_id = b'corr'
client_request = b'api ' + correlation_id + b'corr more stuff'
d = self.protocol.request(client_request)
self.assertNoResult(d)
# The request was written to the server.
server_request = self.transport.getOutBuffer()
self.assertEqual(b'\0\0\0\x17' + client_request, server_request)
self.transport.bufferReceived(b'\0\0\0\x05' + correlation_id + b'y')
self.assertEqual(correlation_id + b'y', self.successResultOf(d))
def test_disconnected(self):
"""
Pending and future requests fail when the connection goes away.
"""
d = self.protocol.request(b'api corr stuff')
self.assertNoResult(d)
self.transport.disconnectReason = ConnectionLost('Bye.')
self.transport.reportDisconnect()
self.failureResultOf(d, ConnectionLost)
self.failureResultOf(self.protocol.request(b'api corr more'), ConnectionLost)
def test_unknown_correlation_id(self):
"""
A warning is logged and the connection dropped when a response with an
unknown correlation ID is received.
"""
events = EventLoggingObserver.createWithCleanup(self, globalLogPublisher)
self.transport.bufferReceived(b'\0\0\0\x101234 more stuff..')
self.assertTrue(self.transport.disconnecting)
[event] = events
self.assertEqual(LogLevel.warn, event['log_level'])
self.assertEqual(self.peer, event['peer'])
self.assertEqual(b'1234', event['correlation_id'])
def test_oversized_response(self):
"""
An oversized response from the server prompts disconnection.
"""
d = self.protocol.request(b'api corr blah blah')
self.transport.bufferReceived(b'\xff\xff\xff\xff') # 2**32 - 1, way too large.
self.assertTrue(self.transport.disconnecting)
self.assertNoResult(d) # Will fail when the disconnect completes.
| apache-2.0 | -532,360,292,095,834,600 | 35.005917 | 92 | 0.677732 | false |
megarcia/GT16_JGRA | source/process_NCEI_01.py | 2 | 6625 | """
Python script 'process_NCEI_01.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
[email protected]
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to [email protected]
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Extract daily station data from cleaned NOAA/NCEI dataset
DEPENDENCIES: h5py, numpy, pandas
USAGE: '$ python process_NCEI_01.py NCEI_WLS_20000101-20101231 ./data'
INPUT: '.csv' and '.h5' output from process_NCEI_00.py
OUTPUT: Updated '.h5' file with sorted meteorological data (no new files)
"""
import sys
import datetime
import h5py as hdf
import numpy as np
import pandas as pd
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
message(' ')
message('process_NCEI_01.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
#
if len(sys.argv) < 3:
message('input warning: no data directory path indicated, using ./data')
path = './data'
else:
path = sys.argv[2]
#
if len(sys.argv) < 2:
message('input error: need prefix for files containing NCEI weather data')
sys.exit(1)
else:
NCEIfname = sys.argv[1]
datafile = '%s/%s_cleaned.csv' % (path, NCEIfname)
h5fname = '%s/%s_processed.h5' % (path, NCEIfname)
#
message('reading station and date information from %s' % h5fname)
with hdf.File(h5fname, 'r') as h5infile:
stn_id = np.copy(h5infile['stn_id'])
dates = np.copy(h5infile['dates'])
message('- identifiers for %d stations found' % len(stn_id))
message('- meteorological data for %d dates found' % len(dates))
message(' ')
#
message('loading weather observation information from %s' % datafile)
stndata_df = pd.read_csv(datafile, low_memory=False)
ndatarows, ndatacols = np.shape(stndata_df)
message('- read %d total data rows with %d columns' % (ndatarows, ndatacols))
stndata_df = stndata_df.drop(['Unnamed: 0', 'IDX'], axis=1)
message('- dropped index columns')
#
# sanity (data integrity) check
metvals = ['STATION', 'STATION_NAME', 'ELEVATION', 'LATITUDE', 'LONGITUDE',
'DATE', 'PRCP', 'PRCP_M_FLAG', 'PRCP_Q_FLAG', 'TMAX', 'TMAX_M_FLAG',
'TMAX_Q_FLAG', 'TMIN', 'TMIN_M_FLAG', 'TMIN_Q_FLAG']
idxs = list(stndata_df.columns.values)
if idxs == metvals:
stndata_df = stndata_df.drop(['PRCP_M_FLAG', 'PRCP_Q_FLAG',
'TMAX_M_FLAG', 'TMAX_Q_FLAG',
'TMIN_M_FLAG', 'TMIN_Q_FLAG'], axis=1)
message('- dropped data flag columns')
else:
message('input error: cleaned NCEI weather data file does not have the \
expected fields')
message(' expected %s' % str(metvals))
message(' but found %s' % str(idxs))
sys.exit(1)
message(' ')
#
# sort dataset by date and station, and process met values by date
stndata_df = stndata_df.sort_values(by=['DATE', 'STATION'])
for date in dates:
message('gathering met data for %d' % date)
date_df = stndata_df[stndata_df['DATE'] == date]
nr, nc = np.shape(date_df)
message('- found %d total rows' % nr)
#
prcp_valid_df = date_df[date_df['PRCP'] != -9999]
prcp_stns = list(prcp_valid_df['STATION'])
prcp_lat = np.array(prcp_valid_df['LATITUDE'])
prcp_lon = np.array(prcp_valid_df['LONGITUDE'])
# convert PRCP from 0.1mm to cm
prcp_vals = np.array(prcp_valid_df['PRCP']) / 100.0
message('-- %d stns with PRCP data (mean %.1f stdev %.1f min %.1f \
max %.1f)' % (len(prcp_stns), np.mean(prcp_vals),
np.std(prcp_vals), np.min(prcp_vals),
np.max(prcp_vals)))
#
tmax_valid_df = date_df[date_df['TMAX'] != -9999]
tmax_stns = list(tmax_valid_df['STATION'])
tmax_lat = np.array(tmax_valid_df['LATITUDE'])
tmax_lon = np.array(tmax_valid_df['LONGITUDE'])
# convert TMAX from 0.1dC to dC
tmax_vals = np.array(tmax_valid_df['TMAX']) / 10.0
message('-- %d stns with TMAX data (mean %.1f stdev %.1f min %.1f \
max %.1f)' % (len(tmax_stns), np.mean(tmax_vals),
np.std(tmax_vals), np.min(tmax_vals),
np.max(tmax_vals)))
#
tmin_valid_df = date_df[date_df['TMIN'] != -9999]
tmin_stns = list(tmin_valid_df['STATION'])
tmin_lat = np.array(tmin_valid_df['LATITUDE'])
tmin_lon = np.array(tmin_valid_df['LONGITUDE'])
# convert TMIN from 0.1dC to dC
tmin_vals = np.array(tmin_valid_df['TMIN']) / 10.0
message('-- %d stns with TMIN data (mean %.1f stdev %.1f min %.1f \
max %.1f)' % (len(tmin_stns), np.mean(tmin_vals),
np.std(tmin_vals), np.min(tmin_vals),
np.max(tmin_vals)))
#
message('- saving %d met data to %s' % (date, h5fname))
with hdf.File(h5fname, 'r+') as h5file:
if 'last_updated' in h5file['meta'].keys():
del h5file['meta/last_updated']
h5file.create_dataset('meta/last_updated',
data=datetime.datetime.now().isoformat())
if 'at' in h5file['meta'].keys():
del h5file['meta/at']
h5file.create_dataset('meta/at', data=date)
datepath = 'metdata/%d' % date
if 'metdata' in h5file.keys():
if date in h5file['metdata'].keys():
del h5file[datepath]
h5file.create_dataset(datepath + '/prcp_stns', data=prcp_stns)
h5file.create_dataset(datepath + '/prcp_lat', data=prcp_lat)
h5file.create_dataset(datepath + '/prcp_lon', data=prcp_lon)
h5file.create_dataset(datepath + '/prcp_vals', data=prcp_vals)
h5file.create_dataset(datepath + '/tmax_stns', data=tmax_stns)
h5file.create_dataset(datepath + '/tmax_lat', data=tmax_lat)
h5file.create_dataset(datepath + '/tmax_lon', data=tmax_lon)
h5file.create_dataset(datepath + '/tmax_vals', data=tmax_vals)
h5file.create_dataset(datepath + '/tmin_stns', data=tmin_stns)
h5file.create_dataset(datepath + '/tmin_lat', data=tmin_lat)
h5file.create_dataset(datepath + '/tmin_lon', data=tmin_lon)
h5file.create_dataset(datepath + '/tmin_vals', data=tmin_vals)
message(' ')
#
message('process_NCEI_01.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
# end process_NCEI_01.py
| gpl-3.0 | -8,631,419,957,783,528,000 | 38.670659 | 79 | 0.621736 | false |
rbaindourov/v8-inspector | Source/chrome/tools/cygprofile/check_orderfile_unittest.py | 49 | 1769 | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import check_orderfile
import symbol_extractor
class TestCheckOrderFile(unittest.TestCase):
_SYMBOL_INFOS = [symbol_extractor.SymbolInfo('first', 0x1, 0, ''),
symbol_extractor.SymbolInfo('second', 0x2, 0, ''),
symbol_extractor.SymbolInfo('notProfiled', 0x4, 0, ''),
symbol_extractor.SymbolInfo('third', 0x3, 0, ''),]
def testMatchesSymbols(self):
symbols = ['first', 'second', 'third']
(misordered_pairs_count, matched_count, missing_count) = (
check_orderfile._CountMisorderedSymbols(symbols, self._SYMBOL_INFOS))
self.assertEquals(
(misordered_pairs_count, matched_count, missing_count), (0, 3, 0))
def testMissingMatches(self):
symbols = ['second', 'third', 'other', 'first']
(_, matched_count, unmatched_count) = (
check_orderfile._CountMisorderedSymbols(symbols, self._SYMBOL_INFOS))
self.assertEquals(matched_count, 3)
self.assertEquals(unmatched_count, 1)
def testNoUnorderedSymbols(self):
symbols = ['first', 'other', 'second', 'third', 'noMatchEither']
(misordered_pairs_count, _, _) = (
check_orderfile._CountMisorderedSymbols(symbols, self._SYMBOL_INFOS))
self.assertEquals(misordered_pairs_count, 0)
def testUnorderedSymbols(self):
symbols = ['first', 'other', 'third', 'second', 'noMatchEither']
(misordered_pairs_count, _, _) = (
check_orderfile._CountMisorderedSymbols(symbols, self._SYMBOL_INFOS))
self.assertEquals(misordered_pairs_count, 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,746,830,056,616,794,000 | 37.456522 | 77 | 0.66987 | false |
ChantyTaguan/zds-site | zds/member/urls.py | 1 | 2339 | # coding: utf-8
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', 'zds.member.views.index'),
url(r'^voir/(?P<user_name>.+)/$',
'zds.member.views.details'),
url(r'^profil/modifier/(?P<user_pk>\d+)/$',
'zds.member.views.modify_profile'),
url(r'^profil/lier/$',
'zds.member.views.add_oldtuto'),
url(r'^profil/delier/$',
'zds.member.views.remove_oldtuto'),
url(r'^tutoriels/$',
'zds.member.views.tutorials'),
url(r'^articles/$',
'zds.member.views.articles'),
url(r'^actions/$',
'zds.member.views.actions'),
url(r'^parametres/profil/$',
'zds.member.views.settings_profile'),
url(r'^parametres/profil/maj_avatar/$',
'zds.member.views.update_avatar'),
url(r'^parametres/mini_profil/(?P<user_name>.+)/$',
'zds.member.views.settings_mini_profile'),
url(r'^parametres/compte/$',
'zds.member.views.settings_account'),
url(r'^parametres/user/$',
'zds.member.views.settings_user'),
url(r'^connexion/$',
'zds.member.views.login_view'),
url(r'^deconnexion/$',
'zds.member.views.logout_view'),
url(r'^inscription/$',
'zds.member.views.register_view'),
url(r'^reinitialisation/$',
'zds.member.views.forgot_password'),
url(r'^new_password/$',
'zds.member.views.new_password'),
url(r'^activation/$',
'zds.member.views.active_account'),
url(r'^envoi_jeton/$',
'zds.member.views.generate_token_account'),
) | gpl-3.0 | -7,926,029,447,301,832,000 | 44.882353 | 74 | 0.398461 | false |
tchx84/sugar | src/jarabe/model/telepathyclient.py | 12 | 4950 | # Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import dbus
from dbus import PROPERTIES_IFACE
from telepathy.interfaces import CLIENT, \
CHANNEL, \
CHANNEL_TYPE_TEXT, \
CLIENT_APPROVER, \
CLIENT_HANDLER, \
CLIENT_INTERFACE_REQUESTS
from telepathy.server import DBusProperties
from telepathy.constants import CONNECTION_HANDLE_TYPE_ROOM
from telepathy.constants import CONNECTION_HANDLE_TYPE_CONTACT
from sugar3 import dispatch
SUGAR_CLIENT_SERVICE = 'org.freedesktop.Telepathy.Client.Sugar'
SUGAR_CLIENT_PATH = '/org/freedesktop/Telepathy/Client/Sugar'
_instance = None
class TelepathyClient(dbus.service.Object, DBusProperties):
def __init__(self):
self._interfaces = set([CLIENT, CLIENT_HANDLER,
CLIENT_INTERFACE_REQUESTS, PROPERTIES_IFACE,
CLIENT_APPROVER])
bus = dbus.Bus()
bus_name = dbus.service.BusName(SUGAR_CLIENT_SERVICE, bus=bus)
dbus.service.Object.__init__(self, bus_name, SUGAR_CLIENT_PATH)
DBusProperties.__init__(self)
self._implement_property_get(CLIENT, {
'Interfaces': lambda: list(self._interfaces),
})
self._implement_property_get(CLIENT_HANDLER, {
'HandlerChannelFilter': self.__get_filters_handler_cb,
})
self._implement_property_get(CLIENT_APPROVER, {
'ApproverChannelFilter': self.__get_filters_approver_cb,
})
self.got_channel = dispatch.Signal()
self.got_dispatch_operation = dispatch.Signal()
def __get_filters_handler_cb(self):
filter_dict = dbus.Dictionary({}, signature='sv')
return dbus.Array([filter_dict], signature='a{sv}')
def __get_filters_approver_cb(self):
activity_invitation = {
CHANNEL + '.ChannelType': CHANNEL_TYPE_TEXT,
CHANNEL + '.TargetHandleType': CONNECTION_HANDLE_TYPE_ROOM,
}
filter_dict = dbus.Dictionary(activity_invitation, signature='sv')
filters = dbus.Array([filter_dict], signature='a{sv}')
text_invitation = {
CHANNEL + '.ChannelType': CHANNEL_TYPE_TEXT,
CHANNEL + '.TargetHandleType': CONNECTION_HANDLE_TYPE_CONTACT,
}
filter_dict = dbus.Dictionary(text_invitation, signature='sv')
filters.append(filter_dict)
logging.debug('__get_filters_approver_cb %r', filters)
return filters
@dbus.service.method(dbus_interface=CLIENT_HANDLER,
in_signature='ooa(oa{sv})aota{sv}', out_signature='')
def HandleChannels(self, account, connection, channels, requests_satisfied,
user_action_time, handler_info):
logging.debug('HandleChannels\n%r\n%r\n%r\n%r\n%r\n%r\n', account,
connection, channels, requests_satisfied,
user_action_time, handler_info)
for channel in channels:
self.got_channel.send(self, account=account,
connection=connection, channel=channel)
@dbus.service.method(dbus_interface=CLIENT_INTERFACE_REQUESTS,
in_signature='oa{sv}', out_signature='')
def AddRequest(self, request, properties):
logging.debug('AddRequest\n%r\n%r', request, properties)
@dbus.service.method(dbus_interface=CLIENT_APPROVER,
in_signature='a(oa{sv})oa{sv}', out_signature='',
async_callbacks=('success_cb', 'error_cb_'))
def AddDispatchOperation(self, channels, dispatch_operation_path,
properties, success_cb, error_cb_):
success_cb()
try:
logging.debug('AddDispatchOperation\n%r\n%r\n%r', channels,
dispatch_operation_path, properties)
self.got_dispatch_operation.send(
self,
channels=channels,
dispatch_operation_path=dispatch_operation_path,
properties=properties)
except Exception, e:
logging.exception(e)
def get_instance():
global _instance
if not _instance:
_instance = TelepathyClient()
return _instance
| gpl-2.0 | -8,686,728,246,314,015,000 | 37.671875 | 79 | 0.636768 | false |
jbalogh/zamboni | apps/market/models.py | 1 | 7069 | # -*- coding: utf-8 -*-
import time
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.utils import translation
from django.utils.http import urlencode
from translations.fields import TranslatedField
import amo
import amo.models
from amo.urlresolvers import reverse
from stats.models import Contribution
from users.models import UserProfile
from babel import Locale, numbers
import commonware.log
from jinja2.filters import do_dictsort
import jwt
import paypal
log = commonware.log.getLogger('z.market')
class PriceManager(amo.models.ManagerBase):
def active(self):
return self.filter(active=True)
class Price(amo.models.ModelBase):
active = models.BooleanField(default=True)
name = TranslatedField()
price = models.DecimalField(max_digits=5, decimal_places=2)
objects = PriceManager()
currency = 'USD'
class Meta:
db_table = 'prices'
def __unicode__(self):
return u'%s - $%s' % (self.name, self.price)
def _price(self):
"""Return the price and currency for the current locale."""
lang = translation.get_language()
locale = Locale(translation.to_locale(lang))
currency = amo.LOCALE_CURRENCY.get(locale.language)
if currency:
price_currency = self.pricecurrency_set.filter(currency=currency)
if price_currency:
return price_currency[0].price, currency, locale
return self.price, self.currency, locale
def get_price(self):
"""Return the price as a decimal for the current locale."""
return self._price()[0]
def get_price_locale(self):
"""Return the price as a nicely localised string for the locale."""
price, currency, locale = self._price()
return numbers.format_currency(price, currency, locale=locale)
class PriceCurrency(amo.models.ModelBase):
currency = models.CharField(max_length=10,
choices=do_dictsort(amo.OTHER_CURRENCIES))
price = models.DecimalField(max_digits=5, decimal_places=2)
tier = models.ForeignKey(Price)
class Meta:
db_table = 'price_currency'
verbose_name = 'Price currencies'
def __unicode__(self):
return u'%s, %s: %s' % (self.tier, self.currency, self.price)
class AddonPurchase(amo.models.ModelBase):
addon = models.ForeignKey('addons.Addon')
user = models.ForeignKey(UserProfile)
receipt = models.TextField(default='')
class Meta:
db_table = 'addon_purchase'
def __unicode__(self):
return u'%s: %s' % (self.addon, self.user)
def create_receipt(self):
verify = reverse('api.market.verify', args=[self.addon.pk])
hsh = self.addon.get_watermark_hash(self.user)
url = urlencode({amo.WATERMARK_KEY: self.user.email,
amo.WATERMARK_KEY_HASH: hsh})
verify = '%s?%s' % (verify, url)
receipt = dict(typ='purchase-receipt',
product=self.addon.origin,
user={'type': 'email',
'value': self.user.email},
iss=settings.SITE_URL,
nbf=time.time(),
iat=time.time(),
detail=reverse('users.purchases.receipt',
args=[self.addon.pk]),
verify=verify)
self.receipt = jwt.encode(receipt, get_key())
def get_key():
"""Return a key for using with encode."""
return jwt.rsa_load(settings.WEBAPPS_RECEIPT_KEY)
@receiver(models.signals.post_save, sender=AddonPurchase,
dispatch_uid='create_receipt')
def create_receipt(sender, instance, **kw):
"""
When the AddonPurchase gets created, see if we need to create a receipt.
"""
if (kw.get('raw') or instance.addon.type != amo.ADDON_WEBAPP
or instance.receipt):
return
log.debug('Creating receipt for: addon %s, user %s'
% (instance.addon.pk, instance.user.pk))
instance.create_receipt()
instance.save()
@receiver(models.signals.post_save, sender=Contribution,
dispatch_uid='create_addon_purchase')
def create_addon_purchase(sender, instance, **kw):
"""
When the contribution table is updated with the data from PayPal,
update the addon purchase table. Will figure out if we need to add to or
delete from the AddonPurchase table.
"""
if (kw.get('raw') or
instance.type not in [amo.CONTRIB_PURCHASE, amo.CONTRIB_REFUND,
amo.CONTRIB_CHARGEBACK]):
# Whitelist the types we care about. Forget about the rest.
return
log.debug('Processing addon purchase type: %s, addon %s, user %s'
% (amo.CONTRIB_TYPES[instance.type], instance.addon.pk,
instance.user.pk))
if instance.type == amo.CONTRIB_PURCHASE:
log.debug('Creating addon purchase: addon %s, user %s'
% (instance.addon.pk, instance.user.pk))
AddonPurchase.objects.get_or_create(addon=instance.addon,
user=instance.user)
elif instance.type in [amo.CONTRIB_REFUND, amo.CONTRIB_CHARGEBACK]:
purchases = AddonPurchase.objects.filter(addon=instance.addon,
user=instance.user)
for p in purchases:
log.debug('Deleting addon purchase: %s, addon %s, user %s'
% (p.pk, instance.addon.pk, instance.user.pk))
p.delete()
class AddonPremium(amo.models.ModelBase):
"""Additions to the Addon model that only apply to Premium add-ons."""
addon = models.OneToOneField('addons.Addon')
price = models.ForeignKey(Price, blank=True, null=True)
paypal_permissions_token = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'addons_premium'
def __unicode__(self):
return u'Premium %s: %s' % (self.addon, self.price)
def get_price(self):
return self.price.get_price()
def get_price_locale(self):
return self.price.get_price_locale()
def is_complete(self):
return bool(self.addon and self.price and
self.addon.paypal_id and self.addon.support_email)
def has_permissions_token(self):
"""
Have we got a permissions token. If you've got 'should_ignore_paypal'
enabled, then it will just happily return True.
"""
return bool(paypal.should_ignore_paypal() or
self.paypal_permissions_token)
def has_valid_permissions_token(self):
"""
Have we got a valid permissions token by ping paypal. If you've got
'should_ignore_paypal', then it will just happily return True.
"""
if paypal.should_ignore_paypal():
return True
if not self.paypal_permissions_token:
return False
return paypal.check_refund_permission(self.paypal_permissions_token)
| bsd-3-clause | 4,980,549,534,146,986,000 | 33.315534 | 77 | 0.621021 | false |
shurihell/testasia | lms/djangoapps/certificates/tests/test_views.py | 11 | 15721 | """Tests for certificates views. """
import json
import ddt
from uuid import uuid4
from nose.plugins.attrib import attr
from mock import patch
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from openedx.core.lib.tests.assertions.events import assert_event_matches
from student.tests.factories import UserFactory
from track.tests import EventTrackingTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from util.testing import UrlResetMixin
from certificates.api import get_certificate_url
from certificates.models import (
ExampleCertificateSet,
ExampleCertificate,
GeneratedCertificate,
CertificateHtmlViewConfiguration,
)
from certificates.tests.factories import (
BadgeAssertionFactory,
)
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
FEATURES_WITH_CERTS_DISABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_DISABLED['CERTIFICATES_HTML_VIEW'] = False
FEATURES_WITH_CUSTOM_CERTS_ENABLED = {
"CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True
}
FEATURES_WITH_CUSTOM_CERTS_ENABLED.update(FEATURES_WITH_CERTS_ENABLED)
@attr('shard_1')
@ddt.ddt
class UpdateExampleCertificateViewTest(TestCase):
"""Tests for the XQueue callback that updates example certificates. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
DESCRIPTION = 'test'
TEMPLATE = 'test.pdf'
DOWNLOAD_URL = 'http://www.example.com'
ERROR_REASON = 'Kaboom!'
def setUp(self):
super(UpdateExampleCertificateViewTest, self).setUp()
self.cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
self.cert = ExampleCertificate.objects.create(
example_cert_set=self.cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE,
)
self.url = reverse('certificates.views.update_example_certificate')
# Since rate limit counts are cached, we need to clear
# this before each test.
cache.clear()
def test_update_example_certificate_success(self):
response = self._post_to_view(self.cert, download_url=self.DOWNLOAD_URL)
self._assert_response(response)
self.cert = ExampleCertificate.objects.get()
self.assertEqual(self.cert.status, ExampleCertificate.STATUS_SUCCESS)
self.assertEqual(self.cert.download_url, self.DOWNLOAD_URL)
def test_update_example_certificate_invalid_key(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': 'invalid'
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 404)
def test_update_example_certificate_error(self):
response = self._post_to_view(self.cert, error_reason=self.ERROR_REASON)
self._assert_response(response)
self.cert = ExampleCertificate.objects.get()
self.assertEqual(self.cert.status, ExampleCertificate.STATUS_ERROR)
self.assertEqual(self.cert.error_reason, self.ERROR_REASON)
@ddt.data('xqueue_header', 'xqueue_body')
def test_update_example_certificate_invalid_params(self, missing_param):
payload = {
'xqueue_header': json.dumps({
'lms_key': self.cert.access_key
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
del payload[missing_param]
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_update_example_certificate_missing_download_url(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': self.cert.access_key
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid
})
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_update_example_cetificate_non_json_param(self):
payload = {
'xqueue_header': '{/invalid',
'xqueue_body': '{/invalid'
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_unsupported_http_method(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 405)
def test_bad_request_rate_limiting(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': 'invalid'
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
# Exceed the rate limit for invalid requests
# (simulate a DDOS with invalid keys)
for _ in range(100):
response = self.client.post(self.url, data=payload)
if response.status_code == 403:
break
# The final status code should indicate that the rate
# limit was exceeded.
self.assertEqual(response.status_code, 403)
def _post_to_view(self, cert, download_url=None, error_reason=None):
"""Simulate a callback from the XQueue to the example certificate end-point. """
header = {'lms_key': cert.access_key}
body = {'username': cert.uuid}
if download_url is not None:
body['url'] = download_url
if error_reason is not None:
body['error'] = 'error'
body['error_reason'] = self.ERROR_REASON
payload = {
'xqueue_header': json.dumps(header),
'xqueue_body': json.dumps(body)
}
return self.client.post(self.url, data=payload)
def _assert_response(self, response):
"""Check the response from the callback end-point. """
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(content['return_code'], 0)
@attr('shard_1')
class MicrositeCertificatesViewsTests(ModuleStoreTestCase):
"""
Tests for the microsite certificates web/html views
"""
def setUp(self):
super(MicrositeCertificatesViewsTests, self).setUp()
self.client = Client()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='refundable course'
)
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
self.course_id = self.course.location.course_key
self.user = UserFactory.create(
email='[email protected]',
username='joeuser',
password='foo'
)
self.user.profile.name = "Joe User"
self.user.profile.save()
self.client.login(username=self.user.username, password='foo')
self.cert = GeneratedCertificate.objects.create(
user=self.user,
course_id=self.course_id,
download_uuid=uuid4(),
grade="0.95",
key='the_key',
distinction=True,
status='downloadable',
mode='honor',
name=self.user.profile.name,
)
def _certificate_html_view_configuration(self, configuration_string, enabled=True):
"""
This will create a certificate html configuration
"""
config = CertificateHtmlViewConfiguration(enabled=enabled, configuration=configuration_string)
config.save()
return config
def _add_course_certificates(self, count=1, signatory_count=0, is_active=True):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Signatory_Name ' + str(i),
'title': 'Signatory_Title ' + str(i),
'organization': 'Signatory_Organization ' + str(i),
'signature_image_path': '/static/certificates/images/demo-sig{}.png'.format(i),
'id': i,
} for i in xrange(signatory_count)
]
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'course_title': 'course_title_' + str(i),
'signatories': signatories,
'version': 1,
'is_active': is_active
} for i in xrange(count)
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_view_for_microsite(self):
test_configuration_string = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.svg",
"logo_url": "http://www.edx.org"
},
"microsites": {
"testmicrosite": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "platform_microsite",
"company_about_url": "http://www.microsite.org/about-us",
"company_privacy_url": "http://www.microsite.org/edx-privacy-policy",
"company_tos_url": "http://www.microsite.org/microsite-terms-service",
"company_verified_certificate_url": "http://www.microsite.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-microsite.svg",
"logo_url": "http://www.microsite.org",
"company_about_description": "This is special microsite aware company_about_description content",
"company_about_title": "Microsite title"
}
},
"honor": {
"certificate_type": "Honor Code"
}
}"""
config = self._certificate_html_view_configuration(configuration_string=test_configuration_string)
self.assertEquals(config.configuration, test_configuration_string)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url, HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertIn('platform_microsite', response.content)
self.assertIn('http://www.microsite.org', response.content)
self.assertIn('This is special microsite aware company_about_description content', response.content)
self.assertIn('Microsite title', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_view_microsite_configuration_missing(self):
test_configuration_string = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.svg",
"logo_url": "http://www.edx.org",
"company_about_description": "This should not survive being overwritten by static content"
},
"honor": {
"certificate_type": "Honor Code"
}
}"""
config = self._certificate_html_view_configuration(configuration_string=test_configuration_string)
self.assertEquals(config.configuration, test_configuration_string)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url, HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertIn('edX', response.content)
self.assertNotIn('platform_microsite', response.content)
self.assertNotIn('http://www.microsite.org', response.content)
self.assertNotIn('This should not survive being overwritten by static content', response.content)
class TrackShareRedirectTest(UrlResetMixin, ModuleStoreTestCase, EventTrackingTestCase):
"""
Verifies the badge image share event is sent out.
"""
@patch.dict(settings.FEATURES, {"ENABLE_OPENBADGES": True})
def setUp(self):
super(TrackShareRedirectTest, self).setUp('certificates.urls')
self.client = Client()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='trackable course'
)
self.assertion = BadgeAssertionFactory(
user=self.user, course_id=self.course.id, data={
'image': 'http://www.example.com/image.png',
'json': {'id': 'http://www.example.com/assertion.json'},
'issuer': 'http://www.example.com/issuer.json'
},
)
def test_social_event_sent(self):
test_url = '/certificates/badge_share_tracker/{}/social_network/{}/'.format(
unicode(self.course.id),
self.user.username,
)
self.recreate_tracker()
response = self.client.get(test_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://www.example.com/image.png')
assert_event_matches(
{
'name': 'edx.badge.assertion.shared',
'data': {
'course_id': 'testorg/run1/trackable_course',
'social_network': 'social_network',
# pylint: disable=no-member
'assertion_id': self.assertion.id,
'assertion_json_url': 'http://www.example.com/assertion.json',
'assertion_image_url': 'http://www.example.com/image.png',
'user_id': self.user.id,
'issuer': 'http://www.example.com/issuer.json',
'enrollment_mode': 'honor'
},
},
self.get_event()
)
| agpl-3.0 | -8,328,641,508,367,647,000 | 39.310256 | 117 | 0.603842 | false |
ergoregion/Rota-Program | Rota_System/UI/Roles/widget_role.py | 1 | 2190 | __author__ = 'Neil Butcher'
import sys
from PyQt4 import QtCore, QtGui
from Rota_System.UI.Roles.commands_role import CommandChangeRole
class SingleRoleWidget(QtGui.QWidget):
commandIssued = QtCore.pyqtSignal(QtGui.QUndoCommand)
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
self._role = None
self.layout = QtGui.QHBoxLayout(self)
self.descriptionBox = QtGui.QLineEdit(self)
self.descriptionBox.editingFinished.connect(self.description_enter)
self.layout.addWidget(self.descriptionBox)
self.priorityBox = QtGui.QComboBox(self)
self.priorityBox.setMaxVisibleItems(10)
self.priorityBox.addItems(
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
)
self.priorityBox.activated.connect(self.priority_entered)
self.layout.addWidget(self.priorityBox)
def role(self, role):
if self._role:
self._role.descriptionChanged.disconnect(self.update)
self._role.priorityChanged.disconnect(self.update)
self._role = role
role.descriptionChanged.connect(self.update)
role.priorityChanged.connect(self.update)
self.update()
def update(self):
self.priorityBox.setCurrentIndex(self._role.priority)
self.descriptionBox.setText(self._role.description)
def description_entered(self, string):
if self._role:
command = CommandChangeRole(self._role, 'description', str(string))
self.commandIssued.emit(command)
def description_enter(self):
if self._role:
string = self.descriptionBox.text()
if not string == self._role.description:
command = CommandChangeRole(self._role, 'description', str(string))
self.commandIssued.emit(command)
def priority_entered(self, integer):
if self._role:
command = CommandChangeRole(self._role, 'priority', integer)
self.commandIssued.emit(command)
def main():
app = QtGui.QApplication(sys.argv)
w = SingleRoleWidget(None)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() | mit | -201,966,647,692,224,100 | 31.220588 | 83 | 0.638356 | false |
citrix-openstack-build/oslo.reports | oslo_reports/views/text/threading.py | 3 | 2424 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides thread and stack-trace views
This module provides a collection of views for
visualizing threads, green threads, and stack traces
in human-readable form.
"""
from oslo_reports.views import jinja_view as jv
class StackTraceView(jv.JinjaView):
"""A Stack Trace View
This view displays stack trace models defined by
:class:`oslo_reports.models.threading.StackTraceModel`
"""
VIEW_TEXT = (
"{% if root_exception is not none %}"
"Exception: {{ root_exception }}\n"
"------------------------------------\n"
"\n"
"{% endif %}"
"{% for line in lines %}\n"
"{{ line.filename }}:{{ line.line }} in {{ line.name }}\n"
" {% if line.code is not none %}"
"`{{ line.code }}`"
"{% else %}"
"(source not found)"
"{% endif %}\n"
"{% else %}\n"
"No Traceback!\n"
"{% endfor %}"
)
class GreenThreadView(object):
"""A Green Thread View
This view displays a green thread provided by the data
model :class:`oslo_reports.models.threading.GreenThreadModel`
"""
FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}"
def __call__(self, model):
return self.FORMAT_STR.format(
thread_str=" Green Thread ",
stack_trace=model.stack_trace
)
class ThreadView(object):
"""A Thread Collection View
This view displays a python thread provided by the data
model :class:`oslo_reports.models.threading.ThreadModel` # noqa
"""
FORMAT_STR = "------{thread_str: ^60}------" + "\n" + "{stack_trace}"
def __call__(self, model):
return self.FORMAT_STR.format(
thread_str=" Thread #{0} ".format(model.thread_id),
stack_trace=model.stack_trace
)
| apache-2.0 | -9,103,680,364,283,972,000 | 29.3 | 78 | 0.59901 | false |
Python-Tools/pmfp | pmfp/scripts/_new_cmd.py | 1 | 2358 | import argparse
import json
from pmfp.new import new, new_component, new_pb
from pmfp.config import load_rc
def new_cmd(args: argparse.Namespace):
config = load_rc()
if config is False:
kwargs_o = _parser_args(args)
c_language = kwargs_o.get("language").capitalize()
spl_name = kwargs_o.get("component_name").split("-")
if len(spl_name) == 1:
print(spl_name)
if spl_name[0] in ("pb", "grpc", "grpc-streaming"):
if kwargs_o["rename"] == "-":
rename = "example"
elif kwargs_o["rename"] == "":
rename = "example_pb"
else:
rename = kwargs_o["rename"]
print(spl_name)
print(rename)
print(kwargs_o.get("to", "pbschema"))
new_pb(c_name=spl_name[0], rename=rename, to=kwargs_o.get("to", "pbschema"), project_name="example")
else:
c_category = spl_name[0]
c_name = "".join(spl_name[1:])
path = f"{c_language}/{c_category}/{c_name}"
config = {
"project-language": c_language,
"project-name": "tempname"
}
to = "." if kwargs_o.get("to") == "-" else kwargs_o.get("to")
new_component(
config,
path,
to,
kwargs_o.get("rename"),
kwargs_o.get("test"),
**kwargs_o.get("kwargs", {})
)
else:
kwargs_o = _parser_args(args)
new(config, kwargs_o)
def _parser_args(args: argparse.Namespace):
result = {
"component_name": None,
'to': "-",
'rename': "-",
"language": "-",
"test": False,
"kwargs": {}
}
if args.component_name:
result["component_name"] = args.component_name
if args.to:
result['to'] = args.to
if args.rename:
result['rename'] = args.rename
if args.language:
result['language'] = args.language
if args.test:
result['test'] = args.test
if args.kwargs:
print(args.kwargs)
try:
result['kwargs'] = json.loads(args.kwargs)
except Exception as e:
print("关键字kwargs无法解析为json形式")
return result
| mit | -1,921,588,049,715,524,900 | 31.027397 | 116 | 0.485458 | false |
effigies/mne-python | mne/forward/tests/test_forward.py | 1 | 13130 | import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne.io import Raw
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution)
from mne import SourceEstimate, pick_types_forward, read_evokeds
from mne.label import read_label
from mne.utils import requires_mne, run_subprocess, _TempDir, run_tests_if_main
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
"""Helper to compare two potentially converted forward solutions"""
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations
"""
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@testing.requires_testing_data
def test_io_forward():
"""Test IO for forward solutions
"""
temp_dir = _TempDir()
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_true(len(w) == 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# Raw
raw = Raw(fname_raw)
raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
"""Test restriction of source space to source SourceEstimate
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
"""Test restriction of source space to label
"""
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
+ len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
+ len(fwd['src'][0]['vertno']))
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)
assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
"""Test averaging forward solutions
"""
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
# input not a list
assert_raises(TypeError, average_forward_solutions, 1)
# list is too short
assert_raises(ValueError, average_forward_solutions, [])
# negative weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
assert_raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
| bsd-3-clause | 870,074,566,463,039,500 | 39.152905 | 79 | 0.630084 | false |
CYBAI/servo | tests/wpt/web-platform-tests/css/css-fonts/support/fonts/makegsubfonts.py | 30 | 14166 | from __future__ import print_function
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError("Unsupported cmap table format: %d" % table.format)
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError("Unsupported cmap table format: %d" % table.format)
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError("Unsupported cmap table format: %d" % table.format)
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError("Unsupported cmap table format: %d" % table.format)
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print("Making lookup type 1 font...")
makeLookup1()
print("Making lookup type 3 font...")
makeLookup3()
# output javascript data
print("Making javascript data file...")
makeJavascriptData()
| mpl-2.0 | 8,045,101,316,494,257,000 | 28.088296 | 135 | 0.649795 | false |
c7zero/chipsec | tests/hardware/test_generic.py | 6 | 2384 | #!/usr/bin/python
#CHIPSEC: Platform Security Assessment Framework
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
import os
import platform
import tempfile
import unittest
import chipsec_main
from chipsec import logger
class GenericHardwareTest(unittest.TestCase):
BOOT_MODE_LEGACY = 1
BOOT_MODE_UEFI = 2
def setUp(self):
if hasattr(self, "SYSTEM") and platform.system() != self.SYSTEM:
self.skipTest("Unsupported system {}".format(self.SYSTEM))
if hasattr(self, "DIST") and platform.dist() != self.DIST:
self.skipTest("Unsupported distribution {}".format(self.DIST))
if (hasattr(self, "PRODUCT_NAME") and
self.product_name() != self.PRODUCT_NAME):
self.skipTest("Unsupported platform {}".format(self.PRODUCT_NAME))
if (hasattr(self, "BIOS_VERSION") and
self.bios_version() != self.BIOS_VERSION):
self.skipTest("Unsupported BIOS version "
"{}".format(self.BIOS_VERSION))
if hasattr(self, "BOOT_MODE") and self.boot_mode() != self.BOOT_MODE:
self.skipTest("Unsupported boot type {}".format(self.BOOT_MODE))
_, self.log_file = tempfile.mkstemp()
def tearDown(self):
os.remove(self.log_file)
def _generic_main(self):
cm = chipsec_main.ChipsecMain(["-l", self.log_file])
error_code = cm.main()
logger.logger().close()
self.log = open(self.log_file).read()
self.assertLessEqual(error_code, 31,
"At least one test raised an error")
for test in self.PASS:
self.assertIn("PASSED: {}".format(test), self.log)
for test in self.SKIPPED:
self.assertIn("SKIPPED: {}".format(test), self.log)
| gpl-2.0 | -7,955,920,053,545,327,000 | 35.676923 | 80 | 0.652265 | false |
Vagab0nd/SiCKRAGE | lib3/sqlalchemy/dialects/sqlite/pysqlcipher.py | 4 | 4692 | # sqlite/pysqlcipher.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
``pysqlcipher3`` is a fork of ``pysqlcipher`` for Python 3. This dialect
will attempt to import it if ``pysqlcipher`` is non-present.
.. versionadded:: 1.1.4 - added fallback import for pysqlcipher3
.. versionadded:: 0.9.9 - added pysqlcipher dialect
Driver
------
The driver here is the
`pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
`pysqlcipher3` is a fork of `pysqlcipher` with support for Python 3,
the driver is the same.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.StaticPool`
may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ...engine import url as _url
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def dbapi(cls):
try:
from pysqlcipher import dbapi2 as sqlcipher
except ImportError as e:
try:
from pysqlcipher3 import dbapi2 as sqlcipher
except ImportError:
raise e
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop("passphrase", "")
pragmas = dict((key, cparams.pop(key, None)) for key in self.pragmas)
conn = super(SQLiteDialect_pysqlcipher, self).connect(
*cargs, **cparams
)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s="%s"' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername,
username=url.username,
host=url.host,
database=url.database,
query=url.query,
)
c_args, opts = super(
SQLiteDialect_pysqlcipher, self
).create_connect_args(super_url)
opts["passphrase"] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| gpl-3.0 | 2,216,975,753,478,482,200 | 33 | 96 | 0.692882 | false |
rwl/PyCIM | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/VoltageCompensator/__init__.py | 1 | 1504 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Dynamics.VoltageCompensator.VoltageCompensatorVcompCross import VoltageCompensatorVcompCross
from CIM14.ENTSOE.Dynamics.IEC61970.Dynamics.VoltageCompensator.VoltageCompensatorVcompIEEE import VoltageCompensatorVcompIEEE
nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#VoltageCompensator"
nsPrefix = "cimVoltageCompensator"
| mit | 110,888,129,193,843,500 | 54.703704 | 128 | 0.803191 | false |
feend78/evennia | evennia/scripts/migrations/0009_scriptdb_db_account.py | 2 | 1174 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-05 17:27
from __future__ import unicode_literals
from django.db import migrations, models, connection
import django.db.models.deletion
def _table_exists(db_cursor, tablename):
"Returns bool if table exists or not"
return tablename in connection.introspection.table_names()
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_copy_player_to_account'),
('scripts', '0008_auto_20170606_1731'),
]
db_cursor = connection.cursor()
operations = []
if _table_exists(db_cursor, "players_playerdb"):
# OBS - this is run BEFORE migrations even start, so if we have a player table
# here we are not starting from scratch.
operations = [
migrations.AddField(
model_name='scriptdb',
name='db_account',
field=models.ForeignKey(blank=True, help_text=b'the account to store this script on (should not be set if db_obj is set)', null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.AccountDB', verbose_name=b'scripted account'),
),
]
| bsd-3-clause | -1,930,541,814,525,931,500 | 35.6875 | 254 | 0.650767 | false |
ekasitk/sahara | sahara/service/edp/job_manager.py | 1 | 7750 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.spark import engine as spark_engine
from sahara.service.edp.storm import engine as storm_engine
from sahara.utils import edp
from sahara.utils import proxy as p
LOG = log.getLogger(__name__)
CONF = cfg.CONF
conductor = c.API
ENGINES = [oozie_engine.OozieJobEngine,
spark_engine.SparkJobEngine,
storm_engine.StormJobEngine]
def _get_job_type(job_execution):
return conductor.job_get(context.ctx(), job_execution.job_id).type
def _get_job_engine(cluster, job_execution):
return job_utils.get_plugin(cluster).get_edp_engine(cluster,
_get_job_type(
job_execution))
def _write_job_status(job_execution, job_info):
update = {"info": job_info}
if job_info['status'] in edp.JOB_STATUSES_TERMINATED:
update['end_time'] = datetime.datetime.now()
job_configs = p.delete_proxy_user_for_job_execution(job_execution)
if job_configs:
update['job_configs'] = job_configs
return conductor.job_execution_update(context.ctx(),
job_execution,
update)
def _update_job_status(engine, job_execution):
job_info = engine.get_job_status(job_execution)
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
return job_execution
def _update_job_execution_extra(cluster, job_execution):
if ((CONF.use_namespaces and not CONF.use_floating_ips) or
CONF.proxy_command):
info = cluster.node_groups[0].instances[0].remote().get_neutron_info()
extra = job_execution.extra.copy()
extra['neutron'] = info
job_execution = conductor.job_execution_update(
context.ctx(), job_execution.id, {'extra': extra})
return job_execution
def _run_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster.status != 'Active':
return
eng = _get_job_engine(cluster, job_execution)
if eng is None:
raise e.EDPError(_("Cluster does not support job type %s")
% _get_job_type(job_execution))
job_execution = _update_job_execution_extra(cluster, job_execution)
# Job id is a string
# Status is a string
# Extra is a dictionary to add to extra in the job_execution
jid, status, extra = eng.run_job(job_execution)
# Set the job id and the start time
# Optionally, update the status and the 'extra' field
update_dict = {'oozie_job_id': jid,
'start_time': datetime.datetime.now()}
if status:
update_dict['info'] = {'status': status}
if extra:
curr_extra = job_execution.extra.copy()
curr_extra.update(extra)
update_dict['extra'] = curr_extra
job_execution = conductor.job_execution_update(
ctx, job_execution, update_dict)
def run_job(job_execution_id):
try:
_run_job(job_execution_id)
except Exception as ex:
LOG.warning(
_LW("Can't run job execution (reason: {reason})").format(
reason=ex))
cancel_job(job_execution_id)
conductor.job_execution_update(
context.ctx(), job_execution_id,
{'info': {'status': edp.JOB_STATUS_FAILED},
'start_time': datetime.datetime.now(),
'end_time': datetime.datetime.now()})
def cancel_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
if job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED:
return job_execution
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is None:
return job_execution
engine = _get_job_engine(cluster, job_execution)
if engine is not None:
job_execution = conductor.job_execution_update(
ctx, job_execution_id,
{'info': {'status': edp.JOB_STATUS_TOBEKILLED}})
timeout = CONF.job_canceling_timeout
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
if job_execution.info['status'] not in edp.JOB_STATUSES_TERMINATED:
try:
job_info = engine.cancel_job(job_execution)
except Exception as ex:
job_info = None
LOG.warning(
_LW("Error during cancel of job execution: "
"{error}").format(error=ex))
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution was canceled successfully"))
return job_execution
context.sleep(3)
job_execution = conductor.job_execution_get(
ctx, job_execution_id)
if not job_execution:
LOG.info(_LI("Job execution was deleted. "
"Canceling current operation."))
return job_execution
else:
LOG.info(_LI("Job execution status: {status}").format(
status=job_execution.info['status']))
return job_execution
else:
raise e.CancelingFailed(_('Job execution %s was not canceled')
% job_execution.id)
def get_job_status(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is not None and cluster.status == 'Active':
engine = _get_job_engine(cluster, job_execution)
if engine is not None:
job_execution = _update_job_status(engine,
job_execution)
return job_execution
def update_job_statuses(cluster_id=None):
ctx = context.ctx()
kwargs = {'end_time': None}
if cluster_id:
kwargs.update({'cluster_id': cluster_id})
for je in conductor.job_execution_get_all(ctx, **kwargs):
try:
get_job_status(je.id)
except Exception as e:
LOG.error(_LE("Error during update job execution {job}: {error}")
.format(job=je.id, error=e))
def get_job_config_hints(job_type):
for eng in ENGINES:
if job_type in eng.get_supported_job_types():
return eng.get_possible_job_config(job_type)
| apache-2.0 | 1,219,834,692,894,380,300 | 35.904762 | 79 | 0.612258 | false |
nunpa/configuration | playbooks/library/ec2_ami_2_0_0_1.py | 6 | 14232 | #!/usr/bin/env python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images.
options:
instance_id:
description:
- instance id of the image to create
required: false
default: null
name:
description:
- The name of the new image to create
required: false
default: null
wait:
description:
- wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
state:
description:
- create or deregister/delete image
required: false
default: 'present'
description:
description:
- An optional human-readable string describing the contents and purpose of the AMI.
required: false
default: null
no_reboot:
description:
- An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no".
required: false
default: no
choices: [ "yes", "no" ]
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
device_mapping:
version_added: "2.0"
description:
- An optional list of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
required: false
default: null
delete_snapshot:
description:
- Whether or not to delete an AMI while deregistering it.
required: false
default: null
tags:
description:
- a hash/dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
version_added: "2.0"
launch_permissions:
description:
- Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
required: false
default: null
version_added: "2.0"
author: "Evan Duffield (@scicoin-project) <[email protected]>"
extends_documentation_fragment:
- aws
- ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
EXAMPLES = '''
# Basic AMI Creation
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
register: instance
# Basic AMI Creation, without waiting
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
instance_id: i-xxxxxx
wait: no
name: newtest
register: instance
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: instance
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
register: instance
# Deregister/Delete AMI
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Deregister AMI
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
import sys
import time
try:
import boto
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_image(module, ec2):
"""
Creates new AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
try:
params = {'instance_id': instance_id,
'name': name,
'description': description,
'no_reboot': no_reboot}
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params)
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidAMIName.Duplicate':
images = ec2.get_all_images()
for img in images:
if img.name == name:
module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False)
else:
module.fail_json(msg="Error in retrieving duplicate AMI details")
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
# of subsequent DescribeImages API call using the new image id returned.
for i in range(wait_timeout):
try:
img = ec2.get_image(image_id)
break
except boto.exception.EC2ResponseError, e:
if 'InvalidAMIID.NotFound' in e.error_code and wait:
time.sleep(1)
else:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.")
else:
module.fail_json(msg="timed out waiting for image to be recognized")
# wait here until the image is created
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be created")
if tags:
try:
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
if launch_permissions:
try:
img = ec2.get_image(image_id)
img.set_launch_permissions(**launch_permissions)
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
def deregister_image(module, ec2):
"""
Deregisters AMI
"""
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
img = ec2.get_image(image_id)
if img == None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
params = {'image_id': image_id,
'delete_snapshot': delete_snapshot}
res = ec2.deregister_image(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# wait here until the image is gone
img = ec2.get_image(image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and img is not None:
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be reregistered/deleted")
module.exit_json(msg="AMI deregister/delete operation complete", changed=True)
def update_image(module, ec2):
"""
Updates AMI
"""
image_id = module.params.get('image_id')
launch_permissions = module.params.get('launch_permissions')
if 'user_ids' in launch_permissions:
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
img = ec2.get_image(image_id)
if img == None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(),
image_id = dict(),
delete_snapshot = dict(),
name = dict(),
wait = dict(type="bool", default=False),
wait_timeout = dict(default=900),
description = dict(default=""),
no_reboot = dict(default=False, type="bool"),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict'),
launch_permissions = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
ec2 = ec2_connect(module)
except Exception, e:
module.fail_json(msg="Error while connecting to aws: %s" % str(e))
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):
module.fail_json(msg='image_id needs to be an ami image to registered/delete')
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
if module.params.get('image_id') and module.params.get('launch_permissions'):
# Update image's launch permissions
update_image(module, ec2)
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id'):
module.fail_json(msg='instance_id parameter is required for new image')
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new image')
create_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| agpl-3.0 | -2,800,102,396,749,594,000 | 34.140741 | 266 | 0.651138 | false |
macarthur-lab/exac_readviz_scripts | test/test_check_gvcf.py | 1 | 3230 | import os
import vcf
import unittest
from utils.check_gvcf import convert_genotype_to_alleles, check_gvcf
class TestCheckGVCF(unittest.TestCase):
def setUp(self):
self.sample1_path = os.path.join(os.path.dirname(__file__), "data/sample1.gvcf.gz")
self.sample2a_path = os.path.join(os.path.dirname(__file__), "data/sample2a.gvcf.gz")
self.sample2b_path = os.path.join(os.path.dirname(__file__), "data/sample2b.gvcf.gz")
def test_convert_genotypes(self):
# test that all records in sample1.gvcf are converted correctly
sample1_file = vcf.Reader(filename=self.sample1_path)
actual = [convert_genotype_to_alleles(r) for r in sample1_file]
expected = ['G/G', 'C/C', 'T/T', 'T/T', 'C/C', 'T/T', 'T/T', 'A/A', 'T/T',
'A/A', 'A/A', 'C/C', 'T/T', 'CTTTTTATTTTTATTTTTA/CTTTTTATTTTTATTTTTA',
'A/C', 'TTTTTATTTTTATTTTTA/TTTTTATTTTTATTTTTA', 'A/C', 'T/T', 'A/C',
'T/T', 'A/AT', 'T/T', 'T/T', 'A/A', 'C/C', 'T/T', 'A/A', 'A/A', 'G/G',
'A/A', 'G/G', 'C/C', 'A/A', 'G/G', 'A/A']
self.assertListEqual(actual, expected)
# test that the 4 records in sample2a.gvcf are converted correctly
sample2a_file = vcf.Reader(filename=self.sample2a_path)
actual = [convert_genotype_to_alleles(r) for r in sample2a_file]
expected = ['G/G', 'G/G', 'T/T', 'T/A', 'T/G', 'C/C']
self.assertListEqual(actual, expected)
sample2b_file = vcf.Reader(filename=self.sample2b_path)
actual = [convert_genotype_to_alleles(r) for r in sample2b_file]
expected = ['G/G', 'G/G', 'G/G', 'G/G', 'T/T', 'G/G', 'G/G', 'G/G',
'G/G', 'T/T', 'A/A', 'A/A', 'G/G', 'C/C', 'T/T', 'C/C',
'T/T', 'T/A', 'G/G', 'C/G', 'C/G', 'T/T', 'C/C', 'C/C',
'C/C', 'C/C', 'G/G', 'G/G', 'A/A', 'T/T', 'G/G', 'G/G',
'C/C', 'C/C', 'G/G', 'G/G', 'C/C', 'T/T', 'A/A', 'T/T',
'G/G', 'G/G', 'G/G', 'G/G', 'T/T', 'T/T', 'T/T', 'A/A',
'T/T', 'G/G', 'G/G', 'C/C', 'G/G', 'G/G', 'C/C', 'G/G',
'G/G', 'T/T', 'T/T', 'C/C', 'C/C', 'A/A', 'C/C', 'G/G',
'C/C', 'C/C', 'T/T', 'T/T', 'G/G']
def test_check_gvcf(self):
# sanity check
for path in [self.sample1_path, self.sample2a_path, self.sample2b_path]:
for r in vcf.Reader(filename=path):
success, error_code, error_message = check_gvcf(path, path, r.CHROM, r.POS)
self.assertTrue(success, "check_gvcf against self failed at %(r)s in %(path)s: %(error_code)s %(error_message)s " % locals())
# check for matches
for r in vcf.Reader(filename=self.sample2b_path):
if r.CHROM != "2" or r.POS < 905393 or r.POS > 905494:
continue
success, error_code, error_message = check_gvcf(self.sample2a_path, self.sample2b_path, r.CHROM, r.POS)
#print(r.CHROM, r.POS, success, error_message)
if r.POS == 905394: self.assertTrue(success)
if r.POS == 905492: self.assertTrue(success)
if r.POS == 905493: self.assertFalse(success)
| mit | 2,087,855,657,812,924,000 | 50.269841 | 141 | 0.517647 | false |
beardypig/streamlink | tests/plugins/test_playtv.py | 7 | 1264 | import unittest
from streamlink.plugins.playtv import PlayTV
class TestPluginPlayTV(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(PlayTV.can_handle_url("http://playtv.fr/television/arte"))
self.assertTrue(PlayTV.can_handle_url("http://playtv.fr/television/arte/"))
self.assertTrue(PlayTV.can_handle_url("http://playtv.fr/television/tv5-monde"))
self.assertTrue(PlayTV.can_handle_url("http://playtv.fr/television/france-24-english/"))
self.assertTrue(PlayTV.can_handle_url("http://play.tv/live-tv/9/arte"))
self.assertTrue(PlayTV.can_handle_url("http://play.tv/live-tv/9/arte/"))
self.assertTrue(PlayTV.can_handle_url("http://play.tv/live-tv/21/tv5-monde"))
self.assertTrue(PlayTV.can_handle_url("http://play.tv/live-tv/50/france-24-english/"))
# shouldn't match
self.assertFalse(PlayTV.can_handle_url("http://playtv.fr/television/"))
self.assertFalse(PlayTV.can_handle_url("http://playtv.fr/replay-tv/"))
self.assertFalse(PlayTV.can_handle_url("http://play.tv/live-tv/"))
self.assertFalse(PlayTV.can_handle_url("http://tvcatchup.com/"))
self.assertFalse(PlayTV.can_handle_url("http://youtube.com/"))
| bsd-2-clause | -3,497,835,095,500,838,400 | 53.956522 | 96 | 0.685127 | false |
pazeshun/jsk_apc | demos/grasp_fusion/examples/grasp_fusion/affordance_segmentation/train.py | 2 | 8130 | #!/usr/bin/env python
import argparse
import datetime
import functools
import os
import os.path as osp
import socket
os.environ['MPLBACKEND'] = 'agg' # NOQA
import chainer
from chainer.training import extensions
import fcn
import grasp_fusion_lib
from grasp_fusion_lib.contrib import grasp_fusion
here = osp.dirname(osp.abspath(__file__))
def transform(in_data, model, train):
img, depth, lbl = in_data
# HWC -> CHW
img = img.transpose(2, 0, 1)
if lbl.ndim == 2:
lbl = lbl[:, :, None] # HW -> HW1
lbl = lbl.transpose(2, 0, 1)
if train:
imgs, depths = model.prepare([img], [depth])
img = imgs[0]
depth = depths[0]
C, H, W = img.shape
assert C == 3
assert lbl.shape == (lbl.shape[0], H, W)
return img, depth, lbl
def get_model_and_data(
affordance,
batch_size=1,
comm=None,
modal='rgb',
augmentation=True,
resolution=30,
):
if affordance == 'suction':
dataset_train = grasp_fusion.datasets.SuctionDataset(
'train', augmentation=augmentation,
)
dataset_test = grasp_fusion.datasets.SuctionDataset('test')
else:
assert affordance == 'pinch'
dataset_train = grasp_fusion.datasets.PinchDataset(
'train', augmentation=augmentation, resolution=resolution,
)
dataset_test = grasp_fusion.datasets.PinchDataset(
'test', resolution=resolution,
)
channel_names = dataset_train.channel_names
out_channels = len(channel_names)
predictor = grasp_fusion.models.FCN8sVGG16Sigmoid(
out_channels=out_channels, modal=modal,
)
model = grasp_fusion.models.FCNSigmoidTrainChain(predictor)
if comm:
import chainermn
if comm.rank != 0:
dataset_train = None
dataset_train = chainermn.scatter_dataset(
dataset_train, comm, shuffle=True
)
iter_train = chainer.iterators.SerialIterator(
chainer.datasets.TransformDataset(
dataset_train,
lambda x: transform(x, model=predictor, train=True),
),
batch_size=batch_size,
)
iter_test = chainer.iterators.SerialIterator(
chainer.datasets.TransformDataset(
dataset_test,
lambda x: transform(x, model=predictor, train=False),
),
batch_size=1,
repeat=False,
shuffle=False,
)
return model, iter_train, iter_test, channel_names
def get_trainer(
optimizer,
iter_train,
iter_test,
channel_names,
args,
comm=None,
device=None,
):
if device is None:
device = args.gpu
model = optimizer.target
converter = functools.partial(
chainer.dataset.concat_examples, padding=(0, 0, -1),
)
updater = chainer.training.StandardUpdater(
iter_train, optimizer, converter=converter, device=device,
)
trainer = chainer.training.Trainer(
updater, (args.max_epoch, 'epoch'), out=args.out)
if comm and comm.rank != 0:
return trainer
trainer.extend(fcn.extensions.ParamsReport(args.__dict__))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.LogReport(
trigger=(args.interval_print, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'elapsed_time',
'main/loss', 'validation/main/miou']))
trainer.extend(
grasp_fusion.extensions.SigmoidSegmentationVisReport(
iter_test,
model.predictor,
channel_names=channel_names,
shape=(4, 2),
),
trigger=(args.interval_eval, 'epoch'),
)
trainer.extend(
grasp_fusion.extensions.SigmoidSegmentationEvaluator(
iter_test,
model.predictor,
),
trigger=(args.interval_eval, 'epoch'),
)
trainer.extend(extensions.snapshot_object(
target=model.predictor, filename='model_best.npz'),
trigger=chainer.training.triggers.MaxValueTrigger(
key='validation/main/miou',
trigger=(args.interval_eval, 'epoch')))
assert extensions.PlotReport.available()
trainer.extend(extensions.PlotReport(
y_keys=['main/loss'], x_key='iteration',
file_name='loss.png', trigger=(args.interval_print, 'iteration')))
trainer.extend(extensions.PlotReport(
y_keys=['validation/main/miou'], x_key='iteration',
file_name='miou.png', trigger=(args.interval_print, 'iteration')))
return trainer
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'affordance', choices=['suction', 'pinch'], help='affordance'
)
parser.add_argument('-g', '--gpu', type=int, help='gpu id')
parser.add_argument(
'--multi-node', action='store_true', help='use multi node'
)
parser.add_argument(
'--batch-size-per-gpu', type=int, default=1, help='batch size per gpu'
)
parser.add_argument(
'--lr-base', type=float, default=1e-4, help='learning rate base'
)
parser.add_argument(
'--weight-decay', type=float, default=0.0001, help='weight decay'
)
choices = ['rgb', 'depth', 'rgb+depth']
parser.add_argument(
'--modal', choices=choices, default=choices[0], help='input modal'
)
parser.add_argument(
'--noaug', action='store_true', help='not apply data augmentation'
)
parser.add_argument('--max-epoch', type=float, default=48, help='epoch')
parser.add_argument(
'--resolution', type=int, default=30, help='pinch rotation resolution'
)
parser.add_argument('--pretrained-model', help='pretrained model')
args = parser.parse_args()
if args.multi_node:
import chainermn
comm = chainermn.create_communicator('hierarchical')
device = comm.intra_rank
args.n_node = comm.inter_size
args.n_gpu = comm.size
chainer.cuda.get_device_from_id(device).use()
else:
comm = None
args.n_node = 1
args.n_gpu = 1
chainer.cuda.get_device_from_id(args.gpu).use()
device = args.gpu
now = datetime.datetime.now()
args.timestamp = now.isoformat()
if comm is None or comm.rank == 0:
out = osp.join(
here, 'logs', args.affordance, now.strftime('%Y%m%d_%H%M%S.%f')
)
else:
out = None
if comm:
args.out = comm.bcast_obj(out)
else:
args.out = out
del out
args.hostname = socket.gethostname()
args.git_hash = grasp_fusion_lib.utils.git_hash(__file__)
args.batch_size = args.batch_size_per_gpu * args.n_gpu
args.lr = args.lr_base * args.batch_size
args.momentum = 0.99
args.interval_print = 100
args.interval_eval = 2
# -------------------------------------------------------------------------
# data
model, iter_train, iter_test, channel_names = get_model_and_data(
affordance=args.affordance,
batch_size=args.batch_size_per_gpu,
comm=comm,
modal=args.modal,
augmentation=not args.noaug,
resolution=args.resolution,
)
if args.pretrained_model:
chainer.serializers.load_npz(args.pretrained_model, model.predictor)
if device >= 0:
chainer.cuda.get_device(device).use()
model.to_gpu()
# optimizer
optimizer = chainer.optimizers.MomentumSGD(
lr=args.lr, momentum=args.momentum)
if comm:
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=args.weight_decay))
model.predictor.upscore2.disable_update()
model.predictor.upscore8.disable_update()
model.predictor.upscore_pool4.disable_update()
# trainer
trainer = get_trainer(
optimizer,
iter_train,
iter_test,
channel_names,
args=args,
comm=comm,
device=device,
)
trainer.run()
if __name__ == '__main__':
main()
| bsd-3-clause | 7,043,566,435,338,421,000 | 26.938144 | 79 | 0.611931 | false |
NaturalGIS/naturalgis_qgis | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 15 | 4036 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
import warnings
from qgis.core import (QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterNumber,
QgsProcessingParameterFileDestination,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import raster
from qgis.PyQt.QtCore import QCoreApplication
class RasterLayerHistogram(QgisAlgorithm):
INPUT = 'INPUT'
BINS = 'BINS'
OUTPUT = 'OUTPUT'
BAND = 'BAND'
def group(self):
return self.tr('Plots')
def groupId(self):
return 'plots'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
self.INPUT))
self.addParameter(QgsProcessingParameterNumber(self.BINS,
self.tr('number of bins'), minValue=2, defaultValue=10))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT, self.tr('Histogram'), self.tr('HTML files (*.html)')))
def name(self):
return 'rasterlayerhistogram'
def displayName(self):
return self.tr('Raster layer histogram')
def processAlgorithm(self, parameters, context, feedback):
try:
# importing plotly throws Python warnings from within the library - filter these out
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ResourceWarning)
warnings.filterwarnings("ignore", category=ImportWarning)
import plotly as plt
import plotly.graph_objs as go
except ImportError:
raise QgsProcessingException(QCoreApplication.translate('RasterLayerHistogram', 'This algorithm requires the Python “plotly” library. Please install this library and try again.'))
layer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
band = self.parameterAsInt(parameters, self.BAND, context)
nbins = self.parameterAsInt(parameters, self.BINS, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
# ALERT: this is potentially blocking if the layer is too big
values = raster.scanraster(layer, feedback, band)
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
data = [go.Histogram(x=valueslist,
nbinsx=nbins)]
plt.offline.plot(data, filename=output, auto_open=False)
return {self.OUTPUT: output}
| gpl-2.0 | 3,791,158,812,569,785,300 | 39.32 | 191 | 0.536706 | false |
teknick/eve-wspace | evewspace/POS/utils.py | 11 | 1757 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from models import CorpPOS
import eveapi
from API import utils as handler
def add_status_info(poses):
"""Accepts a list of corp poses and returns a list of POSes with
status information attached.
A posstatus object has the following attributes:
itemid: the POS item id
pos: POS object processed
status: Status retrieved
"""
class statusentry:
def __init__(self, pos, status):
self.itemid = pos.apiitemid
self.pos = pos
self.status = status
api = eveapi.EVEAPIConnection(cacheHandler=handler)
#Now that we have a corp authenticated API, let's play with some POSes
statuslist = []
for pos in poses:
auth = api.auth(keyID=pos.apikey.keyid, vCode=pos.apikey.vcode)
result = auth.corp.StarbaseDetail(itemID=pos.apiitemid)
status = statusentry(pos, result)
statuslist.append(status)
return statuslist
| gpl-3.0 | -4,808,814,160,905,557,000 | 37.195652 | 74 | 0.698349 | false |
connoranderson/Speechables | mechanize-0.2.5/test-tools/unittest/result.py | 22 | 3847 | """Test result object"""
import traceback
from unittest import util
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun = self.testsRun + 1
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"Called when the given test has been run"
pass
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return ''.join(traceback.format_exception(exctype, value, tb, length))
return ''.join(traceback.format_exception(exctype, value, tb))
def _is_relevant_tb_level(self, tb):
globs = tb.tb_frame.f_globals
is_relevant = '__name__' in globs and \
globs["__name__"].startswith("unittest")
del globs
return is_relevant
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| apache-2.0 | 1,196,534,753,680,754,200 | 33.044248 | 82 | 0.616584 | false |
bertucho/epic-movie-quotes-quiz | dialogos/build/scrapy/scrapy/downloadermiddlewares/cookies.py | 23 | 3134 | import os
import six
import logging
from collections import defaultdict
from scrapy.exceptions import NotConfigured
from scrapy.http import Response
from scrapy.http.cookies import CookieJar
logger = logging.getLogger(__name__)
class CookiesMiddleware(object):
"""This middleware enables working with sites that need cookies"""
def __init__(self, debug=False):
self.jars = defaultdict(CookieJar)
self.debug = debug
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COOKIES_ENABLED'):
raise NotConfigured
return cls(crawler.settings.getbool('COOKIES_DEBUG'))
def process_request(self, request, spider):
if request.meta.get('dont_merge_cookies', False):
return
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
for cookie in cookies:
jar.set_cookie_if_ok(cookie, request)
# set Cookie header
request.headers.pop('Cookie', None)
jar.add_cookie_header(request)
self._debug_cookie(request, spider)
def process_response(self, request, response, spider):
if request.meta.get('dont_merge_cookies', False):
return response
# extract cookies from Set-Cookie and drop invalid/expired cookies
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
jar.extract_cookies(response, request)
self._debug_set_cookie(response, spider)
return response
def _debug_cookie(self, request, spider):
if self.debug:
cl = request.headers.getlist('Cookie')
if cl:
msg = "Sending cookies to: %s" % request + os.linesep
msg += os.linesep.join("Cookie: %s" % c for c in cl)
logger.debug(msg, extra={'spider': spider})
def _debug_set_cookie(self, response, spider):
if self.debug:
cl = response.headers.getlist('Set-Cookie')
if cl:
msg = "Received cookies from: %s" % response + os.linesep
msg += os.linesep.join("Set-Cookie: %s" % c for c in cl)
logger.debug(msg, extra={'spider': spider})
def _format_cookie(self, cookie):
# build cookie string
cookie_str = '%s=%s' % (cookie['name'], cookie['value'])
if cookie.get('path', None):
cookie_str += '; Path=%s' % cookie['path']
if cookie.get('domain', None):
cookie_str += '; Domain=%s' % cookie['domain']
return cookie_str
def _get_request_cookies(self, jar, request):
if isinstance(request.cookies, dict):
cookie_list = [{'name': k, 'value': v} for k, v in \
six.iteritems(request.cookies)]
else:
cookie_list = request.cookies
cookies = [self._format_cookie(x) for x in cookie_list]
headers = {'Set-Cookie': cookies}
response = Response(request.url, headers=headers)
return jar.make_cookies(response, request)
| mit | 3,833,325,556,357,626,400 | 33.43956 | 74 | 0.605935 | false |
overtherain/scriptfile | software/googleAppEngine/google/appengine/api/logservice/logservice_stub.py | 2 | 15875 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub implementation for Log Service that utilizes the Datastore.
Logs can be flushed, which will store them in the Datastore, and retrieved for
use by the user. Users can retrieve logs along a number of different query
parameters, including the time the request began, whether or not
application-level logs should be included, and so on.
"""
import os
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api import datastore_errors
from google.appengine.api import logservice
from google.appengine.api import namespace_manager
from google.appengine.api.logservice import log_service_pb
from google.appengine.api.logservice import logservice
from google.appengine.ext import db
from google.appengine.runtime import apiproxy_errors
LOG_NAMESPACE = '_Logs'
_FUTURE_TIME = 2**34
_REQUEST_TIME = 0
_CURRENT_REQUEST_ID_HASH = ''
def _get_request_id():
"""Returns the request ID bound to this request.
Specifically, we see if the request ID hash has changed since the last time we
have examined it. If so, we generate a new ID based on the current time.
Regardless, we return a string whose value decreases w.r.t. time, so that
values stored in the Datastore will be sorted from newest to oldest.
"""
global _CURRENT_REQUEST_ID_HASH
global _REQUEST_TIME
request_id_hash = os.environ.get('REQUEST_ID_HASH', '')
if _CURRENT_REQUEST_ID_HASH != request_id_hash:
_CURRENT_REQUEST_ID_HASH = request_id_hash
_REQUEST_TIME = time.time()
return str(int((_FUTURE_TIME - _REQUEST_TIME) * 1000000))
def _flush_logs_buffer():
"""Empties all logs stored within the globally-held logs buffer."""
logservice.logs_buffer().flush()
def _run_in_namespace(f, *args):
"""Calls 'f' while within the logs namespace.
This is done by methods that need to read or write log data via the Datastore,
as they operate within the LOG_NAMESPACE namespace. Utilizing this namespace
ensures that the user doesn't accidentally receive logs in their query results
or have their Datastore Viewer cluttered by it unless they specifically ask
for it via that namespace.
Args:
f: The function that should be called within the logs namespace.
*args: A list of arguments that f should be called with.
Returns:
The result of f(*args).
"""
namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(LOG_NAMESPACE)
return f(*args)
finally:
namespace_manager.set_namespace(namespace)
class _LogLine(db.Model):
"""Representation of an application-level log line."""
time = db.IntegerProperty()
level = db.IntegerProperty()
message = db.BlobProperty()
class _LogRecord(db.Model):
"""Representation of the logging information for a single web request."""
app_id = db.StringProperty()
version_id = db.StringProperty()
ip = db.StringProperty()
nickname = db.StringProperty()
request_id = db.StringProperty()
start_time = db.IntegerProperty()
end_time = db.IntegerProperty()
latency = db.IntegerProperty()
mcycles = db.IntegerProperty()
method = db.StringProperty()
resource = db.TextProperty()
status = db.IntegerProperty()
response_size = db.IntegerProperty()
http_version = db.StringProperty()
host = db.StringProperty()
user_agent = db.StringProperty()
finished = db.BooleanProperty()
app_logs = db.ListProperty(db.Key)
@classmethod
def get_or_create(cls):
"""Returns the LogRecord for this request, creating it if needed."""
return cls.get_or_insert(str(_get_request_id()))
def fill_in_log(self, request, log, app_logs):
"""Fills in fields in a given RequestLog from a LogReadRequest's fields.
Application-level logs are stored in the Datastore as _LogLines, so this
method also grabs those items, resolves them, and stores them in the given
RequestLog.
Args:
request: A LogReadRequest, containing the filters that the user has
specified to use for their request.
log: A RequestLog whose fields will be overriden with those from request.
app_logs: The application-level logs associated with the given log.
"""
log.set_app_id(self.app_id)
log.set_version_id(self.version_id)
log.set_ip(self.ip)
log.set_nickname(self.nickname)
log.set_request_id(str(self.key()))
log.set_start_time(self.start_time)
log.set_end_time(self.end_time)
log.set_latency(self.latency)
log.set_mcycles(self.mcycles)
log.set_method(self.method)
log.set_resource(self.resource)
log.set_status(self.status)
log.set_response_size(self.response_size)
log.set_http_version(self.http_version)
if self.host is not None:
log.set_host(self.host)
if self.user_agent is not None:
log.set_user_agent(self.user_agent)
log.set_finished(self.finished)
log.set_url_map_entry('')
time_seconds = (self.end_time or self.start_time) / 10**6
date_string = time.strftime('%d/%b/%Y:%H:%M:%S %z',
time.localtime(time_seconds))
log.set_combined('%s - %s [%s] \"%s %s %s\" %d %d - \"%s\"' %
(self.ip, self.nickname, date_string, self.method,
self.resource, self.http_version, self.status or 0,
self.response_size or 0, self.user_agent))
if request.include_app_logs():
for app_log in app_logs:
log_line = log.add_line()
log_line.set_time(app_log.time)
log_line.set_level(app_log.level)
log_line.set_log_message(app_log.message)
class RequestLogWriter(object):
"""A helper class that writes log lines to the Datastore.
Writes log lines to the Datastore on behalf of the SDK's dev_appserver so that
they can be queried later via fetch(). Each of three methods write the
information for a given request:
1) write_request_info: Writes the information found at the beginning of the
request.
2) write: Writes the information found at the end of the request.
3) write_app_logs: Writes application-level logs emitted by the application
(if any).
Properties:
app_id: A string representing the application ID that this request
corresponds to.
version_id: A string representing the version ID that this request
corresponds to.
request_id: An integer that represents a monotonically increasing request
number. The actual value of the request ID doesn't matter - what is
important is that later requests have larger request IDs than earlier
requests.
db_key: A string that will be used as the key for the LogRecord associated
with this request. Requests are sorted in descending order w.r.t. time,
so we just set the key to be computed by a function that decreases w.r.t.
time.
log_msgs: A list that contains the application-level logs generated by
request. Currently this is not implemented - once we get better
integration with the LogService API, this will be remedied.
method: A string corresponding to the HTTP method for this request.
resource: A string corresponding to the relative URL for this request.
http_version: A string corresponding to the HTTP version for this request.
Note that the entire HTTP version is stored here (e.g., "HTTP/1.1" and
not just "1.1").
"""
def __init__(self, persist=False):
"""Constructor.
Args:
persist: If true, log records should be durably persisted.
"""
self.persist = persist
def write_request_info(self, ip, app_id, version_id, nickname, user_agent,
host, start_time=None, end_time=None):
"""Writes a single request log with currently known information.
Args:
ip: The user's IP address.
app_id: A string representing the application ID that this request
corresponds to.
version_id: A string representing the version ID that this request
corresponds to.
nickname: A string representing the user that has made this request (that
is, the user's nickname, e.g., 'foobar' for a user logged in as
'[email protected]').
user_agent: A string representing the agent used to make this request.
host: A string representing the host that received this request.
start_time: If specified, a starting time that should be used instead of
generating one internally (useful for testing).
end_time: If specified, an ending time that should be used instead of
generating one internally (useful for testing).
"""
if not self.persist:
return
namespace_manager.set_namespace(LOG_NAMESPACE)
log = _LogRecord.get_or_create()
log.app_id = app_id
major_version_id = version_id.split('.')[0]
log.version_id = major_version_id
log.ip = ip
log.nickname = nickname
log.user_agent = user_agent
log.host = host
now_time_usecs = self.get_time_now()
log.request_id = str(now_time_usecs)
if start_time is not None:
log.start_time = start_time
else:
log.start_time = now_time_usecs
log.latency = 0
log.mcycles = 0
if end_time:
log.end_time = end_time
log.finished = True
else:
log.finished = False
log.app_logs = []
log.put()
def get_time_now(self):
"""Get the current time in microseconds since epoch."""
return int(time.time() * 1000000)
def write(self, method, resource, status, size, http_version):
"""Writes all request-level information to the Datastore."""
if self.persist:
_run_in_namespace(self._write, method, resource, status, size,
http_version)
def _write(self, method, resource, status, size, http_version):
"""Implements write if called by _run_in_namespace."""
log = _LogRecord.get_or_create()
log.method = method
log.resource = resource
log.status = status
log.response_size = size
log.http_version = http_version
if not log.finished:
log.end_time = self.get_time_now()
log.latency = log.end_time - (log.start_time or 0)
log.finished = True
log.put()
class LogServiceStub(apiproxy_stub.APIProxyStub):
"""Python stub for Log Service service."""
__DEFAULT_READ_COUNT = 20
def __init__(self, persist=False):
"""Constructor."""
super(LogServiceStub, self).__init__('logservice')
self.persist = persist
self.status = None
def _Dynamic_Flush(self, request, unused_response):
"""Writes application-level log messages for a request to the Datastore."""
if self.persist:
group = log_service_pb.UserAppLogGroup(request.logs())
new_app_logs = self.put_log_lines(group.log_line_list())
self.write_app_logs(new_app_logs)
def put_log_lines(self, lines):
"""Creates application-level log lines and stores them in the Datastore.
Args:
lines: A list of LogLines that each correspond to an application-level log
line.
Returns:
A list of Keys corresponding to the newly-stored log lines.
"""
return _run_in_namespace(self._put_log_lines, lines)
def _put_log_lines(self, lines):
"""Implements put_log_lines if called by _run_in_namespace."""
db_models = []
for app_log in lines:
db_log_line = _LogLine(time=app_log.timestamp_usec(),
level=app_log.level(),
message=app_log.message())
db_models.append(db_log_line)
return db.put(db_models)
def write_app_logs(self, new_app_logs):
"""Writes application-level logs for a given request to the Datastore."""
return _run_in_namespace(self._write_app_logs, new_app_logs)
def _write_app_logs(self, new_app_logs):
"""Implements write_app_logs if called by _run_in_namespace."""
log = _LogRecord.get_or_create()
for app_log in new_app_logs:
log.app_logs.append(app_log)
log.put()
def _Dynamic_SetStatus(self, request, unused_response):
"""Record the recently seen status."""
self.status = request.status()
def _Dynamic_Read(self, request, response):
"""Handler for LogRead RPC call.
Our stub implementation stores and retrieves log data via the Datastore,
but because query parameters do not persist in the cursor, we create an
internal cursor that also contains these extra parameters. If it is
present, we parse out these parameters, and conversely, when we create the
cursor, we are sure to include the parameters back in for later retrieval.
Args:
request: A LogReadRequest object.
response: A LogReadResponse object.
"""
_run_in_namespace(self.__Dynamic_Read, request, response)
def __Dynamic_Read(self, request, response):
"""Implements _Dynamic_Read if called by _run_in_namespace."""
response.clear_offset()
if request.version_id_size() != 1:
raise apiproxy_errors.ApplicationError(
log_service_pb.LogServiceError.INVALID_REQUEST)
if (request.request_id_size() and
(request.has_start_time() or request.has_end_time() or
request.has_offset())):
raise apiproxy_errors.ApplicationError(
log_service_pb.LogServiceError.INVALID_REQUEST)
if request.request_id_size():
results = []
try:
results = db.get(request.request_id_list())
except datastore_errors.BadKeyError:
for request_id in request.request_id_list():
try:
results.append(db.get(request_id))
except datastore_errors.BadKeyError:
pass
for result in results:
if result.version_id != request.version_id(0):
continue
log = response.add_log()
app_logs = db.get(result.app_logs)
result.fill_in_log(request, log, app_logs)
return
query = db.Query(_LogRecord)
if request.has_offset():
query.filter('__key__ > ', db.Key(request.offset().request_id()))
if request.has_count():
limit = request.count()
else:
limit = LogServiceStub.__DEFAULT_READ_COUNT
versions = request.version_id_list()
index = 0
for result in query.run(limit=limit):
index += 1
start = result.start_time
if request.has_start_time():
if request.start_time() > start:
continue
if request.has_end_time():
if request.end_time() <= start:
continue
if not request.include_incomplete() and not result.finished:
continue
if result.version_id not in versions:
continue
app_logs = db.get(result.app_logs)
if request.has_minimum_log_level():
for app_log in app_logs:
if app_log.level >= request.minimum_log_level():
break
else:
continue
log = response.add_log()
result.fill_in_log(request, log, app_logs)
log.mutable_offset().set_request_id(str(result.key()))
if index == limit:
response.mutable_offset().set_request_id(str(result.key()))
def _Dynamic_Usage(self, request, response):
"""Dummy method for compatibility."""
raise apiproxy_errors.CapabilityDisabledError('Usage not allowed in tests.')
def get_status(self):
"""Internal method for dev_appserver to read the status."""
return self.status
| mit | 5,645,606,425,958,882,000 | 30.75 | 80 | 0.675339 | false |
agx/git-buildpackage | gbp/git/fastimport.py | 1 | 4709 |
# vim: set fileencoding=utf-8 :
#
# (C) 2011 Guido Günther <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
"""Git fast import class"""
import subprocess
import time
from gbp.errors import GbpError
from gbp.format import format_b
from gbp.paths import to_bin
class FastImport(object):
"""Add data to a git repository using I{git fast-import}"""
_bufsize = 1024
m_regular = 644
m_exec = 755
m_symlink = 120000
def __init__(self, repo):
"""
@param repo: the git repository L{FastImport} acts on
@type repo: L{GitRepository}
"""
self._repo = repo
try:
self._fi = subprocess.Popen(['git', 'fast-import', '--quiet'],
stdin=subprocess.PIPE, cwd=repo.path)
self._out = self._fi.stdin
except OSError as err:
raise GbpError("Error spawning git fast-import: %s" % err)
except ValueError as err:
raise GbpError(
"Invalid argument when spawning git fast-import: %s" % err)
def _do_data(self, fd, size):
self._out.write(format_b(b"data %d\n", size))
while True:
data = fd.read(self._bufsize)
self._out.write(data)
if len(data) != self._bufsize:
break
self._out.write(b"\n")
def _do_file(self, filename, mode, fd, size):
name = b"/".join(to_bin(filename).split(b'/')[1:])
self._out.write(format_b(b"M %d inline %s\n", mode, name))
self._do_data(fd, size)
def add_file(self, filename, fd, size, mode=m_regular):
"""
Add a file
@param filename: the name of the file to add
@type filename: C{str}
@param fd: stream to read data from
@type fd: C{File} like object
@param size: size of the file to add
@type size: C{int}
@param mode: file mode, default is L{FastImport.m_regular}.
@type mode: C{int}
"""
self._do_file(filename, mode, fd, size)
def add_symlink(self, linkname, linktarget):
"""
Add a symlink
@param linkname: the symbolic link's name
@param linkname: C{str}
@param linktarget: the target the symlink points to
@type linktarget: C{str}
"""
linktarget = to_bin(linktarget)
linkname = to_bin(linkname)
self._out.write(format_b(b"M %d inline %s\n", self.m_symlink, linkname))
self._out.write(format_b(b"data %d\n", len(linktarget)))
self._out.write(format_b(b"%s\n", linktarget))
def start_commit(self, branch, committer, msg):
"""
Start a fast import commit
@param branch: branch to commit on
@type branch: C{str}
@param committer: the committer information
@type committer: L{GitModifier}
@param msg: the commit message
@type msg: C{str}
"""
length = len(msg)
if not committer.date:
committer.date = "%d %s" % (time.time(),
time.strftime("%z"))
if self._repo.has_branch(branch):
from_ = "from refs/heads/%(branch)s^0\n"
else:
from_ = ''
s = """commit refs/heads/%(branch)s
committer %(name)s <%(email)s> %(time)s
data %(length)s
%(msg)s%(from)s""" % {'branch': branch,
'name': committer.name,
'email': committer.email,
'time': committer.date,
'length': length,
'msg': msg,
'from': from_}
self._out.write(s.encode())
def deleteall(self):
"""
Issue I{deleteall} to fastimport so we start from a empty tree
"""
self._out.write(b"deleteall\n")
def close(self):
"""
Close fast-import issuing all pending actions
"""
if self._out:
self._out.close()
if self._fi:
self._fi.wait()
def __del__(self):
self.close()
| gpl-2.0 | 8,218,491,110,797,956,000 | 31.694444 | 80 | 0.558411 | false |
kumar303/zamboni | mkt/comm/authorization.py | 19 | 2879 | from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import PermissionDenied
from mkt.comm.models import (CommunicationNote, CommunicationThread,
user_has_perm_note, user_has_perm_thread)
class ThreadPermission(BasePermission):
"""
Permission wrapper for checking if the authenticated user has the
permission to view the thread.
"""
def has_permission(self, request, view):
# Let `has_object_permission` handle the permissions when we retrieve
# an object.
if view.action == 'retrieve':
return True
if not request.user.is_authenticated():
raise PermissionDenied()
return True
def has_object_permission(self, request, view, obj):
"""
Make sure we give correct permissions to read/write the thread.
"""
if not request.user.is_authenticated() or obj.read_permission_public:
return obj.read_permission_public
return user_has_perm_thread(obj, request.user)
class NotePermission(ThreadPermission):
def has_permission(self, request, view):
thread_id = view.kwargs.get('thread_id')
if not thread_id and view.kwargs.get('note_id'):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
thread_id = note.thread_id
# We save the thread in the view object so we can use it later.
view.comm_thread = get_object_or_404(
CommunicationThread, id=thread_id)
return ThreadPermission.has_object_permission(
self, request, view, view.comm_thread)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
return user_has_perm_note(obj, request.user)
class AttachmentPermission(NotePermission):
def has_permission(self, request, view):
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
def has_object_permission(self, request, view, obj):
# Has thread obj-level permission AND note obj-level permission.
note = CommunicationNote.objects.get(id=view.kwargs['note_id'])
return NotePermission.has_object_permission(self, request, view, note)
class EmailCreationPermission(object):
"""Permit if client's IP address is allowed."""
def has_permission(self, request, view):
auth_token = request.META.get('HTTP_POSTFIX_AUTH_TOKEN')
if auth_token and auth_token not in settings.POSTFIX_AUTH_TOKEN:
return False
remote_ip = request.META.get('REMOTE_ADDR')
return remote_ip and (
remote_ip in settings.ALLOWED_CLIENTS_EMAIL_API)
| bsd-3-clause | 8,662,823,910,853,650,000 | 35.443038 | 78 | 0.674192 | false |
miguelinux/vbox | src/libs/xpcom18a4/python/components.py | 4 | 9813 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Python XPCOM language bindings.
#
# The Initial Developer of the Original Code is
# ActiveState Tool Corp.
# Portions created by the Initial Developer are Copyright (C) 2000, 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Hammond <[email protected]> (original author)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
# This module provides the JavaScript "components" interface
from . import xpt
import xpcom
import xpcom._xpcom as _xpcom
import xpcom.client
import xpcom.server
StringTypes = [bytes, str]
def _get_good_iid(iid):
if iid is None:
iid = _xpcom.IID_nsISupports
elif type(iid) in StringTypes and len(iid)>0 and iid[0] != "{":
iid = getattr(interfaces, iid)
return iid
# The "manager" object.
manager = xpcom.client.Component(_xpcom.GetComponentManager(), _xpcom.IID_nsIComponentManager)
# The component registrar
registrar = xpcom.client.Component(_xpcom.GetComponentManager(), _xpcom.IID_nsIComponentRegistrar)
# The "interfaceInfoManager" object - JS doesnt have this.
interfaceInfoManager = _xpcom.XPTI_GetInterfaceInfoManager()
# The serviceManager - JS doesnt have this either!
serviceManager = _xpcom.GetServiceManager()
# The "Exception" object
Exception = xpcom.COMException
# Base class for our collections.
# It appears that all objects supports "." and "[]" notation.
# eg, "interface.nsISupports" or interfaces["nsISupports"]
class _ComponentCollection:
# Bases are to over-ride 2 methods.
# _get_one(self, name) - to return one object by name
# _build_dict - to return a dictionary which provide access into
def __init__(self):
self._dict_data = None
def keys(self):
if self._dict_data is None:
self._dict_data = self._build_dict()
return list(self._dict_data.keys())
def items(self):
if self._dict_data is None:
self._dict_data = self._build_dict()
return list(self._dict_data.items())
def values(self):
if self._dict_data is None:
self._dict_data = self._build_dict()
return list(self._dict_data.values())
# def has_key(self, key):
# if self._dict_data is None:
# self._dict_data = self._build_dict()
# return self._dict_data.has_key(key)
def __len__(self):
if self._dict_data is None:
self._dict_data = self._build_dict()
return len(self._dict_data)
def __getattr__(self, attr):
if self._dict_data is not None and attr in self._dict_data:
return self._dict_data[attr]
return self._get_one(attr)
def __getitem__(self, item):
if self._dict_data is not None and item in self._dict_data:
return self._dict_data[item]
return self._get_one(item)
_constants_by_iid_map = {}
class _Interface:
# An interface object.
def __init__(self, name, iid):
# Bypass self.__setattr__ when initializing attributes.
d = self.__dict__
d['_iidobj_'] = iid # Allows the C++ framework to treat this as a native IID.
d['name'] = name
def __cmp__(self, other):
this_iid = self._iidobj_
other_iid = getattr(other, "_iidobj_", other)
return cmp(this_iid, other_iid)
def __eq__(self, other):
this_iid = self._iidobj_
other_iid = getattr(other, "_iidobj_", other)
return this_iid == other_iid
def __hash__(self):
return hash(self._iidobj_)
def __str__(self):
return str(self._iidobj_)
def __getitem__(self, item):
raise TypeError("components.interface objects are not subscriptable")
def __setitem__(self, item, value):
raise TypeError("components.interface objects are not subscriptable")
def __setattr__(self, attr, value):
raise AttributeError("Can not set attributes on components.Interface objects")
def __getattr__(self, attr):
# Support constants as attributes.
c = _constants_by_iid_map.get(self._iidobj_)
if c is None:
c = {}
i = xpt.Interface(self._iidobj_)
for c_ob in i.constants:
c[c_ob.name] = c_ob.value
_constants_by_iid_map[self._iidobj_] = c
if attr in c:
return c[attr]
raise AttributeError("'%s' interfaces do not define a constant '%s'" % (self.name, attr))
class _Interfaces(_ComponentCollection):
def _get_one(self, name):
try:
item = interfaceInfoManager.GetInfoForName(name)
except xpcom.COMException as why:
# Present a better exception message, and give a more useful error code.
from . import nsError
raise xpcom.COMException(nsError.NS_ERROR_NO_INTERFACE, "The interface '%s' does not exist" % (name,))
return _Interface(item.GetName(), item.GetIID())
def _build_dict(self):
ret = {}
enum = interfaceInfoManager.EnumerateInterfaces()
while not enum.IsDone():
# Call the Python-specific FetchBlock, to keep the loop in C.
items = enum.FetchBlock(500, _xpcom.IID_nsIInterfaceInfo)
# This shouldnt be necessary, but appears to be so!
for item in items:
ret[item.GetName()] = _Interface(item.GetName(), item.GetIID())
return ret
# And the actual object people use.
interfaces = _Interfaces()
del _Interfaces # Keep our namespace clean.
#################################################
class _Class:
def __init__(self, contractid):
self.contractid = contractid
def __getattr__(self, attr):
if attr == "clsid":
rc = registrar.contractIDToCID(self.contractid)
# stash it away - it can never change!
self.clsid = rc
return rc
raise AttributeError("%s class has no attribute '%s'" % (self.contractid, attr))
def createInstance(self, iid = None):
import xpcom.client
try:
return xpcom.client.Component(self.contractid, _get_good_iid(iid))
except xpcom.COMException as details:
from . import nsError
# Handle "no such component" in a cleaner way for the user.
if details.errno == nsError.NS_ERROR_FACTORY_NOT_REGISTERED:
raise xpcom.COMException(details.errno, "No such component '%s'" % (self.contractid,))
raise # Any other exception reraise.
def getService(self, iid = None):
return serviceManager.getServiceByContractID(self.contractid, _get_good_iid(iid))
class _Classes(_ComponentCollection):
def __init__(self):
_ComponentCollection.__init__(self)
def _get_one(self, name):
# XXX - Need to check the contractid is valid!
return _Class(name)
def _build_dict(self):
ret = {}
enum = registrar.enumerateContractIDs()
while enum.hasMoreElements():
# Call the Python-specific FetchBlock, to keep the loop in C.
items = enum.fetchBlock(2000, _xpcom.IID_nsISupportsCString)
for item in items:
name = str(item.data)
ret[name] = _Class(name)
return ret
classes = _Classes()
del _Classes
del _ComponentCollection
# The ID function
ID = _xpcom.ID
# A helper to cleanup our namespace as xpcom shuts down.
class _ShutdownObserver:
_com_interfaces_ = interfaces.nsIObserver
def observe(self, service, topic, extra):
global manager, registrar, classes, interfaces, interfaceInfoManager, _shutdownObserver, serviceManager, _constants_by_iid_map
manager = registrar = classes = interfaces = interfaceInfoManager = _shutdownObserver = serviceManager = _constants_by_iid_map = None
xpcom.client._shutdown()
xpcom.server._shutdown()
svc = _xpcom.GetServiceManager().getServiceByContractID("@mozilla.org/observer-service;1", interfaces.nsIObserverService)
# Observers will be QI'd for a weak-reference, so we must keep the
# observer alive ourself, and must keep the COM object alive,
# _not_ just the Python instance!!!
_shutdownObserver = xpcom.server.WrapObject(_ShutdownObserver(), interfaces.nsIObserver)
# Say we want a weak ref due to an assertion failing. If this is fixed, we can pass 0,
# and remove the lifetime hacks above! See http://bugzilla.mozilla.org/show_bug.cgi?id=99163
svc.addObserver(_shutdownObserver, "xpcom-shutdown", 1)
del svc, _ShutdownObserver
| gpl-2.0 | -1,621,872,366,042,716,200 | 39.217213 | 141 | 0.654438 | false |
lsaffre/lino-faggio | lino_voga/lib/courses/fixtures/demo.py | 2 | 3254 | # -*- coding: UTF-8 -*-
# Copyright 2012-2016 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from builtins import range
import datetime
from lino.api import dd, rt
from lino.utils import mti, Cycler
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
# courses = dd.resolve_app('courses')
# cal = dd.resolve_app('cal')
# users = dd.resolve_app('users')
def objects():
Person = rt.models.contacts.Person
PupilType = rt.models.courses.PupilType
TeacherType = rt.models.courses.TeacherType
Pupil = rt.models.courses.Pupil
Teacher = rt.models.courses.Teacher
SalesRule = rt.models.invoicing.SalesRule
yield PupilType(ref="M", **dd.str2kw('name', _("Member")))
yield PupilType(ref="H", **dd.str2kw('name', _("Helper")))
yield PupilType(ref="N", **dd.str2kw('name', _("Non-member")))
#~ yield courses.Room(name="A")
#~ yield cal.Place(name="A")
#~ yield cal.Place(name="B")
#~ yield cal.Place(name="C")
#~ yield cal.Place(name="D")
#~ yield cal.Place(name="E")
#~ yield cal.Place(name="F")
PTYPES = Cycler(PupilType.objects.all())
TTYPES = Cycler(TeacherType.objects.all())
n = 0
for p in Person.objects.all():
if n % 2 == 0:
yield mti.insert_child(p, Pupil, pupil_type=PTYPES.pop())
if n % 9 == 0:
yield mti.insert_child(p, Teacher, teacher_type=TTYPES.pop())
n += 1
invoice_recipient = None
for n, p in enumerate(Pupil.objects.all()):
if n % 10 == 0:
yield SalesRule(
partner=p, invoice_recipient=invoice_recipient)
# p.invoice_recipient = invoice_recipient
# yield p
else:
invoice_recipient = p
if False:
#~ PS = Cycler(courses.PresenceStatus.objects.all())
CONTENTS = Cycler(rt.models.courses.Line.objects.all())
USERS = Cycler(rt.models.users.User.objects.all())
PLACES = Cycler(rt.models.cal.Room.objects.all())
TEACHERS = Cycler(Teacher.objects.all())
SLOTS = Cycler(rt.models.courses.Slot.objects.all())
#~ SLOTS = Cycler(1,2,3,4)
PUPILS = Cycler(Pupil.objects.all())
#~ Event = settings.SITE.models.cal.Event
#~ from lino_xl.lib.cal.utils import DurationUnit
year = settings.SITE.demo_date().year
if settings.SITE.demo_date().month < 7:
year -= 1
for i in range(10):
c = courses.Course(
user=USERS.pop(),
teacher=TEACHERS.pop(),
line=CONTENTS.pop(), room=PLACES.pop(),
start_date=datetime.date(year, 9, 1 + i),
end_date=datetime.date(year + 1, 6, 30),
every=1,
every_unit=cal.DurationUnits.weeks,
slot=SLOTS.pop(),
)
yield c
for j in range(5):
yield courses.Enrolment(pupil=PUPILS.pop(), course=c)
c.save() # fill presences
#~ for j in range(5):
#~ yield courses.Event(start_date=settings.SITE.demo_date(j*7),course=c)
#~ yield courses.Presence()
| bsd-2-clause | -7,735,009,765,770,809,000 | 32.895833 | 88 | 0.579902 | false |
RaumZeit/gdesklets-core | shell2/control/local/__init__.py | 2 | 6608 | ''' Handles the local desklets and controls '''
import dircache
import os
from unpack import unpack # untar the files
import file_operations # remove, etc.
import core_interface_035 as core_interface
def get_desklets(directory):
''' Gets the local desklets from the given directory '''
list = {}
to_be_parsed = []
dircache.reset()
try:
display_dir = dircache.listdir( directory )
except OSError:
# no desklet dir available
return list
for dir in display_dir:
list.update( get_desklet_information(directory, dir) )
return list
def get_desklet_information(directory, dir):
list = {}
to_be_parsed = [] # list of display files to parse
desklet_contents = dircache.listdir( os.path.join(directory, dir) )
# find all display files under the subdirectory
for file in desklet_contents:
if file[-8:] == ".display":
to_be_parsed.append(file)
# If there are multiple displays inside a directory, save
# them all inside a "displays" dict under
# the dirname
if len(to_be_parsed) > 1:
list[dir] = {}
list[dir]["displays"] = {}
max_version = 0
authors = []
for display in to_be_parsed:
full_path = directory+"/"+dir+"/"+display
meta = parse_display_meta( full_path, dir )
# find the largest version of all the displays and show that as the
# version of the package
max_version = max(max_version, meta["version"])
list[dir]["displays"][ meta["name"] ] = meta
try:
# will fail here if the name is there already
authors.index(meta['name'])
authors.append(meta['name'])
except: pass
list[dir]["state"] = "installed"
list[dir]["version"] = max_version
list[dir]["name"] = dir
list[dir]["preview"] = "" # make blank so that the available preview gets used
list[dir]["description"] = ""
list[dir]["category"] = "Multi-display"
list[dir]["directory"] = dir
list[dir]["author"] = authors
elif len(to_be_parsed) == 1:
full_path = directory+"/"+dir+"/"+to_be_parsed[0]
meta = parse_display_meta( full_path )
meta["directory"] = dir
meta["displays"] = {meta["name"]: meta} # there is only one display in this package
list[ meta["name"] ] = meta
return list
def get_controls(directory):
''' Gets the local controls from the given directory '''
list = {}
dircache.reset()
try:
control_dir = dircache.listdir( directory )
except OSError:
# no control dir available
return list
# pretty useless to have a dict where the name is under the name
# perhaps in the future we'll have something like properties in
# the dict...
for dir in control_dir:
list[dir] = {"name": dir, "directory": os.path.join(directory, dir)}
return list
def parse_display_meta( target_file, package = "" ):
''' Opens the display file and parses it for meta information. Does
not start the display. The "package" parameter gets saved inside
multi-display-packages, so that the displays know where they belong '''
f = open( target_file )
read_meta_tag = False
# first read the entire meta tag into a string
while not read_meta_tag:
line = f.readline()
start_index = line.find( "<meta" )
if line != "" and start_index != -1:
found_end_tag = False
meta_tag = line[start_index:]
while not found_end_tag:
line = f.readline()
end_index = line.find( ">" )
if line != "" and end_index != -1:
meta_tag += line[:end_index+1]
found_end_tag = True
read_meta_tag = True
else:
meta_tag += line
elif line == "": # readline returned "" -> we reached the end of the file
# print "No metatag found from ", target_file
meta = {"name": os.path.basename(target_file), "author": "Unknown", "version" : "Unknown",
"category": None, "description": "",
"preview": None, "state": None,
"local_path": target_file, "package": package }
return meta
# then parse the attributes
try:
name_start = meta_tag.index( 'name=\"' ) + 6
name_end = meta_tag.index( '"', name_start )
name = unicode(meta_tag[ name_start:name_end ])
except ValueError:
name = "Desklet name unknown"
try:
author_start = meta_tag.index( 'author=\"' ) + 8
author_end = meta_tag.index( '"', author_start )
author = unicode(meta_tag[ author_start:author_end ])
except ValueError:
author = "Unknown author"
try:
version_start = meta_tag.index( 'version=\"' ) + 9
version_end = meta_tag.index( '"', version_start )
version = unicode(meta_tag[ version_start:version_end ])
except ValueError:
version = ""
try:
category_start = meta_tag.index( 'category=\"' ) + 10
category_end = meta_tag.index( '"', category_start )
category = unicode(meta_tag[ category_start:category_end ])
except ValueError:
category = "Uncategorized"
try:
description_start = meta_tag.index( 'description=\"' ) + 13
description_end = meta_tag.index( '"', description_start )
description = unicode(meta_tag[ description_start:description_end ])
except ValueError:
description = "No description"
try:
preview_start = meta_tag.index( 'preview=\"' ) + 9
preview_end = meta_tag.index( '"', preview_start )
preview = unicode(meta_tag[ preview_start:preview_end ])
display_path = os.path.dirname(target_file)
preview = os.path.join(display_path, preview)
# if there was no preview image available, then try to find one
if not os.path.exists(preview):
pass
# print " ! no preview image for", name, " @", preview
except ValueError:
preview = ""
state = "installed"
meta = {"name": name, "author": author, "version" :version,
"category": category, "description": description,
"preview": preview, "state": state,
"local_path": target_file, "package": package }
return meta
| gpl-2.0 | -4,502,939,055,051,956,700 | 33.596859 | 102 | 0.568099 | false |
grahamgilbert/docker-sal | settings.py | 1 | 7196 | # Django settings for sal project.
from settings_import import ADMINS, TIME_ZONE, LANGUAGE_CODE, ALLOWED_HOSTS, DISPLAY_NAME, PLUGIN_ORDER
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
APPEND_SLASH=False
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
AUTH_PROFILE_MODULE = "sal.UserProfile"
MANAGERS = ADMINS
# Only show these plugins on the front page - some things only the admins should see.
LIMIT_PLUGIN_TO_FRONT_PAGE = []
# Hide these plugins from the front page
HIDE_PLUGIN_FROM_FRONT_PAGE = []
# Hide these plugins from the specified business units
HIDE_PLUGIN_FROM_BUSINESS_UNIT = {
#'Encryption':['1']
}
# Hide these plugins from the specified machine groups
HIDE_PLUGIN_FROM_MACHINE_GROUP = {
# 'DiskSpace':['1']
}
PLUGIN_DIR = os.path.join(PROJECT_DIR, 'plugins')
# If you want to have a default machine group, define this to the key of
# that group.
#DEFAULT_MACHINE_GROUP_KEY = ''
# Facts which will have historical data kept in addition to the most
# recent instanct of that fact.
HISTORICAL_FACTS = [
# 'memoryfree_mb',
]
# How long to keep historical facts around before pruning them.
HISTORICAL_DAYS = 180
EXCLUDED_FACTS = {
'sshrsakey',
'sshfp_rsa',
'sshfp_dsa',
'sshdsakey',
}
EXCLUDED_CONDITIONS = {
# 'some_condition',
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_DIR, 'db/sal.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# PG Database
if os.environ.has_key('DB_PORT_5432_TCP_ADDR'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_PORT_5432_TCP_ADDR'],
'PORT': os.environ['DB_PORT_5432_TCP_PORT'],
}
}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'site_static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ppf%ls0f)mzkf#2dl-nbf^8f&=84py=y^u8^z-f559*d36y_@v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"sal.context_processors.display_name",
"sal.context_processors.config_installed",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
LOGIN_URL='/login/'
LOGIN_REDIRECT_URL='/'
ROOT_URLCONF = 'sal.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sal.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
os.path.join(PROJECT_DIR, 'server', 'plugins'),
PLUGIN_DIR,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'sal',
'server',
'south',
'bootstrap3',
)
BOOTSTRAP3 = {
'set_placeholder': False,
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 | -1,470,711,382,611,792,100 | 31.269058 | 121 | 0.672318 | false |
mzdaniel/oh-mainline | vendor/packages/twill/twill/other_packages/_mechanize_dist/_mozillacookiejar.py | 20 | 6161 | """Mozilla / Netscape cookie loading / saving.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, time, logging
from _clientcookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("ClientCookie").debug
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%s does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith("#") or
line.strip().startswith("$") or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except:
reraise_unmasked_exceptions((IOError,))
raise LoadError("invalid Netscape format file %s: %s" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
debug("Saving Netscape cookies.txt file")
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
debug(" Not saving %s: marked for discard", cookie.name)
continue
if not ignore_expires and cookie.is_expired(now):
debug(" Not saving %s: expired", cookie.name)
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| agpl-3.0 | -3,639,022,201,207,968,300 | 37.748428 | 80 | 0.540497 | false |
edxnercel/edx-platform | lms/djangoapps/mobile_api/users/serializers.py | 16 | 3387 | """
Serializer for user API
"""
from rest_framework import serializers
from rest_framework.reverse import reverse
from courseware.courses import course_image_url
from student.models import CourseEnrollment, User
from certificates.models import certificate_status_for_student, CertificateStatuses
class CourseOverviewField(serializers.RelatedField):
"""Custom field to wrap a CourseDescriptor object. Read-only."""
def to_native(self, course_overview):
course_id = unicode(course_overview.id)
request = self.context.get('request', None)
if request:
video_outline_url = reverse(
'video-summary-list',
kwargs={'course_id': course_id},
request=request
)
course_updates_url = reverse(
'course-updates-list',
kwargs={'course_id': course_id},
request=request
)
course_handouts_url = reverse(
'course-handouts-list',
kwargs={'course_id': course_id},
request=request
)
else:
video_outline_url = None
course_updates_url = None
course_handouts_url = None
return {
"id": course_id,
"name": course_overview.display_name,
"number": course_overview.display_number_with_default,
"org": course_overview.display_org_with_default,
"start": course_overview.start,
"end": course_overview.end,
"course_image": course_overview.course_image_url,
"social_urls": {
"facebook": course_overview.facebook_url,
},
"latest_updates": {
"video": None
},
"video_outline": video_outline_url,
"course_updates": course_updates_url,
"course_handouts": course_handouts_url,
"subscription_id": course_overview.clean_id(padding_char='_'),
}
class CourseEnrollmentSerializer(serializers.ModelSerializer):
"""
Serializes CourseEnrollment models
"""
course = CourseOverviewField(source="course_overview")
certificate = serializers.SerializerMethodField('get_certificate')
def get_certificate(self, model):
"""Returns the information about the user's certificate in the course."""
certificate_info = certificate_status_for_student(model.user, model.course_id)
if certificate_info['status'] == CertificateStatuses.downloadable:
return {
"url": certificate_info['download_url'],
}
else:
return {}
class Meta(object): # pylint: disable=missing-docstring
model = CourseEnrollment
fields = ('created', 'mode', 'is_active', 'course', 'certificate')
lookup_field = 'username'
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializes User models
"""
name = serializers.Field(source='profile.name')
course_enrollments = serializers.HyperlinkedIdentityField(
view_name='courseenrollment-detail',
lookup_field='username'
)
class Meta(object): # pylint: disable=missing-docstring
model = User
fields = ('id', 'username', 'email', 'name', 'course_enrollments')
lookup_field = 'username'
| agpl-3.0 | 8,744,006,128,680,315,000 | 34.28125 | 86 | 0.600827 | false |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/ruutu.py | 29 | 4297 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
determine_ext,
int_or_none,
xpath_attr,
xpath_text,
)
class RuutuIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ruutu\.fi/video/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.ruutu.fi/video/2058907',
'md5': 'ab2093f39be1ca8581963451b3c0234f',
'info_dict': {
'id': '2058907',
'ext': 'mp4',
'title': 'Oletko aina halunnut tietää mitä tapahtuu vain hetki ennen lähetystä? - Nyt se selvisi!',
'description': 'md5:cfc6ccf0e57a814360df464a91ff67d6',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 114,
'age_limit': 0,
},
},
{
'url': 'http://www.ruutu.fi/video/2057306',
'md5': '065a10ae4d5b8cfd9d0c3d332465e3d9',
'info_dict': {
'id': '2057306',
'ext': 'mp4',
'title': 'Superpesis: katso koko kausi Ruudussa',
'description': 'md5:da2736052fef3b2bd5e0005e63c25eac',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 40,
'age_limit': 0,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
video_xml = self._download_xml(
'http://gatling.ruutu.fi/media-xml-cache?id=%s' % video_id, video_id)
formats = []
processed_urls = []
def extract_formats(node):
for child in node:
if child.tag.endswith('Files'):
extract_formats(child)
elif child.tag.endswith('File'):
video_url = child.text
if (not video_url or video_url in processed_urls or
any(p in video_url for p in ('NOT_USED', 'NOT-USED'))):
return
processed_urls.append(video_url)
ext = determine_ext(video_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
video_url, video_id, f4m_id='hds', fatal=False))
else:
proto = compat_urllib_parse_urlparse(video_url).scheme
if not child.tag.startswith('HTTP') and proto != 'rtmp':
continue
preference = -1 if proto == 'rtmp' else 1
label = child.get('label')
tbr = int_or_none(child.get('bitrate'))
format_id = '%s-%s' % (proto, label if label else tbr) if label or tbr else proto
if not self._is_valid_url(video_url, video_id, format_id):
continue
width, height = [int_or_none(x) for x in child.get('resolution', 'x').split('x')[:2]]
formats.append({
'format_id': format_id,
'url': video_url,
'width': width,
'height': height,
'tbr': tbr,
'preference': preference,
})
extract_formats(video_xml.find('./Clip'))
self._sort_formats(formats)
return {
'id': video_id,
'title': xpath_attr(video_xml, './/Behavior/Program', 'program_name', 'title', fatal=True),
'description': xpath_attr(video_xml, './/Behavior/Program', 'description', 'description'),
'thumbnail': xpath_attr(video_xml, './/Behavior/Startpicture', 'href', 'thumbnail'),
'duration': int_or_none(xpath_text(video_xml, './/Runtime', 'duration')),
'age_limit': int_or_none(xpath_text(video_xml, './/AgeLimit', 'age limit')),
'formats': formats,
}
| gpl-2.0 | -5,128,898,413,219,188,000 | 41.078431 | 115 | 0.466449 | false |
BiaDarkia/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 34 | 2170 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired,
edgecolors=(0, 0, 0))
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause | -4,375,816,046,593,719,300 | 36.413793 | 78 | 0.602304 | false |
redhat-openstack/neutron | neutron/plugins/ofagent/agent/flows.py | 18 | 15211 | # Copyright (C) 2014 VA Linux Systems Japan K.K.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenFlow1.3 flow table for OFAgent
* requirements
** plain OpenFlow 1.3. no vendor extensions.
* legends
xxx: network id (agent internal use)
yyy: segment id (vlan id, gre key, ...)
a,b,c: tunnel port (tun_ofports, map[net_id].tun_ofports)
i,j,k: vm port (map[net_id].vif_ports[vif_id].ofport)
x,y,z: physical port (int_ofports)
N: tunnel type (0 for TYPE_GRE, 1 for TYPE_xxx, ...)
iii: unknown ip address
uuu: unicast l2 address
* tables (in order)
CHECK_IN_PORT
TUNNEL_IN+N
PHYS_IN
LOCAL_IN
ARP_PASSTHROUGH
ARP_RESPONDER
TUNNEL_OUT
LOCAL_OUT
PHYS_OUT
TUNNEL_FLOOD+N
PHYS_FLOOD
LOCAL_FLOOD
* CHECK_IN_PORT
for each vm ports:
// check_in_port_add_local_port, check_in_port_delete_port
in_port=i, write_metadata(LOCAL|xxx),goto(LOCAL_IN)
TYPE_GRE
for each tunnel ports:
// check_in_port_add_tunnel_port, check_in_port_delete_port
in_port=a, goto(TUNNEL_IN+N)
TYPE_VLAN
for each networks ports:
// provision_tenant_physnet, reclaim_tenant_physnet
in_port=x,vlan_vid=present|yyy, write_metadata(xxx),goto(PHYS_IN)
TYPE_FLAT
// provision_tenant_physnet, reclaim_tenant_physnet
in_port=x, write_metadata(xxx),goto(PHYS_IN)
default drop
* TUNNEL_IN+N (per tunnel types) tunnel -> network
for each networks:
// provision_tenant_tunnel, reclaim_tenant_tunnel
tun_id=yyy, write_metadata(xxx),goto(TUNNEL_OUT)
default drop
* PHYS_IN
default goto(TUNNEL_OUT)
* LOCAL_IN
default goto(next_table)
* ARP_PASSTHROUGH
for each unknown tpa:
// arp_passthrough
arp,arp_op=request,metadata=xxx,tpa=iii, idle_timeout=5, goto(TUNNEL_OUT)
default goto(next_table)
* ARP_RESPONDER
arp,arp_op=request, output:controller
default goto(next_table)
* TUNNEL_OUT
TYPE_GRE
// !FLOODING_ENTRY
// install_tunnel_output, delete_tunnel_output
metadata=LOCAL|xxx,eth_dst=uuu set_tunnel(yyy),output:a
default goto(next table)
* LOCAL_OUT
for each known destinations:
// local_out_add_port, local_out_delete_port
metadata=xxx,eth_dst=uuu output:i
default goto(next table)
* PHYS_OUT
NOTE(yamamoto): currently this table is always empty.
default goto(next table)
* TUNNEL_FLOOD+N. (per tunnel types)
network -> tunnel/vlan
output to tunnel/physical ports
"next table" might be LOCAL_OUT
TYPE_GRE
for each networks:
// FLOODING_ENTRY
// install_tunnel_output, delete_tunnel_output
metadata=LOCAL|xxx, set_tunnel(yyy),output:a,b,c,goto(next table)
default goto(next table)
* PHYS_FLOOD
TYPE_VLAN
for each networks:
// provision_tenant_physnet, reclaim_tenant_physnet
metadata=LOCAL|xxx, push_vlan:0x8100,set_field:present|yyy->vlan_vid,
output:x,pop_vlan,goto(next table)
TYPE_FLAT
for each networks:
// provision_tenant_physnet, reclaim_tenant_physnet
metadata=LOCAL|xxx, output:x,goto(next table)
default goto(next table)
* LOCAL_FLOOD
for each networks:
// local_flood_update, local_flood_delete
metadata=xxx, output:i,j,k
or
metadata=xxx,eth_dst=broadcast, output:i,j,k
default drop
* references
** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
*** we use metadata instead of "internal" VLANs
*** we don't want to use NX learn action
"""
from ryu.lib.packet import arp
from ryu.ofproto import ether
from neutron.plugins.common import constants as p_const
import neutron.plugins.ofagent.agent.metadata as meta
from neutron.plugins.ofagent.agent import ofswitch
from neutron.plugins.ofagent.agent import tables
class OFAgentIntegrationBridge(ofswitch.OpenFlowSwitch):
"""ofagent br-int specific logic."""
def setup_default_table(self):
self.delete_flows()
self.install_default_drop(tables.CHECK_IN_PORT)
for t in tables.TUNNEL_IN.values():
self.install_default_drop(t)
self.install_default_goto(tables.PHYS_IN, tables.TUNNEL_OUT)
self.install_default_goto_next(tables.LOCAL_IN)
self.install_default_goto_next(tables.ARP_PASSTHROUGH)
self.install_arp_responder(tables.ARP_RESPONDER)
self.install_default_goto_next(tables.TUNNEL_OUT)
self.install_default_goto_next(tables.LOCAL_OUT)
self.install_default_goto_next(tables.PHYS_OUT)
for t in tables.TUNNEL_FLOOD.values():
self.install_default_goto_next(t)
self.install_default_goto_next(tables.PHYS_FLOOD)
self.install_default_drop(tables.LOCAL_FLOOD)
def install_arp_responder(self, table_id):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(eth_type=ether.ETH_TYPE_ARP,
arp_op=arp.ARP_REQUEST)
actions = [ofpp.OFPActionOutput(ofp.OFPP_CONTROLLER)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)]
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
self.install_default_goto_next(table_id)
def install_tunnel_output(self, table_id,
network, segmentation_id,
ports, goto_next, **additional_matches):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network, meta.LOCAL),
**additional_matches)
actions = [ofpp.OFPActionSetField(tunnel_id=segmentation_id)]
actions += [ofpp.OFPActionOutput(port=p) for p in ports]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
if goto_next:
instructions += [
ofpp.OFPInstructionGotoTable(table_id=table_id + 1),
]
msg = ofpp.OFPFlowMod(dp,
table_id=table_id,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def delete_tunnel_output(self, table_id,
network, **additional_matches):
(dp, _ofp, ofpp) = self._get_dp()
self.delete_flows(table_id=table_id,
metadata=meta.mk_metadata(network, meta.LOCAL),
**additional_matches)
def provision_tenant_tunnel(self, network_type, network, segmentation_id):
(dp, _ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(tunnel_id=segmentation_id)
metadata = meta.mk_metadata(network)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1]),
ofpp.OFPInstructionGotoTable(table_id=tables.TUNNEL_OUT),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.TUNNEL_IN[network_type],
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def reclaim_tenant_tunnel(self, network_type, network, segmentation_id):
table_id = tables.TUNNEL_IN[network_type]
self.delete_flows(table_id=table_id, tunnel_id=segmentation_id)
def provision_tenant_physnet(self, network_type, network,
segmentation_id, phys_port):
"""for vlan and flat."""
assert(network_type in [p_const.TYPE_VLAN, p_const.TYPE_FLAT])
(dp, ofp, ofpp) = self._get_dp()
# inbound
metadata = meta.mk_metadata(network)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1])
]
if network_type == p_const.TYPE_VLAN:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
match = ofpp.OFPMatch(in_port=phys_port, vlan_vid=vlan_vid)
actions = [ofpp.OFPActionPopVlan()]
instructions += [ofpp.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
else:
match = ofpp.OFPMatch(in_port=phys_port)
instructions += [ofpp.OFPInstructionGotoTable(table_id=tables.PHYS_IN)]
msg = ofpp.OFPFlowMod(dp,
priority=1,
table_id=tables.CHECK_IN_PORT,
match=match,
instructions=instructions)
self._send_msg(msg)
# outbound
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network, meta.LOCAL))
if network_type == p_const.TYPE_VLAN:
actions = [
ofpp.OFPActionPushVlan(),
ofpp.OFPActionSetField(vlan_vid=vlan_vid),
]
else:
actions = []
actions += [ofpp.OFPActionOutput(port=phys_port)]
if network_type == p_const.TYPE_VLAN:
actions += [ofpp.OFPActionPopVlan()]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
ofpp.OFPInstructionGotoTable(table_id=tables.PHYS_FLOOD + 1),
]
msg = ofpp.OFPFlowMod(dp,
priority=1,
table_id=tables.PHYS_FLOOD,
match=match,
instructions=instructions)
self._send_msg(msg)
def reclaim_tenant_physnet(self, network_type, network,
segmentation_id, phys_port):
(_dp, ofp, _ofpp) = self._get_dp()
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
if network_type == p_const.TYPE_VLAN:
self.delete_flows(table_id=tables.CHECK_IN_PORT,
in_port=phys_port, vlan_vid=vlan_vid)
else:
self.delete_flows(table_id=tables.CHECK_IN_PORT,
in_port=phys_port)
self.delete_flows(table_id=tables.PHYS_FLOOD,
metadata=meta.mk_metadata(network))
def check_in_port_add_tunnel_port(self, network_type, port):
(dp, _ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(in_port=port)
instructions = [
ofpp.OFPInstructionGotoTable(
table_id=tables.TUNNEL_IN[network_type])
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.CHECK_IN_PORT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def check_in_port_add_local_port(self, network, port):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(in_port=port)
metadata = meta.mk_metadata(network, meta.LOCAL)
instructions = [
ofpp.OFPInstructionWriteMetadata(metadata=metadata[0],
metadata_mask=metadata[1]),
ofpp.OFPInstructionGotoTable(table_id=tables.LOCAL_IN),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.CHECK_IN_PORT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def check_in_port_delete_port(self, port):
self.delete_flows(table_id=tables.CHECK_IN_PORT, in_port=port)
def local_flood_update(self, network, ports, flood_unicast):
(dp, ofp, ofpp) = self._get_dp()
match_all = ofpp.OFPMatch(metadata=meta.mk_metadata(network))
match_multicast = ofpp.OFPMatch(metadata=meta.mk_metadata(network),
eth_dst=('01:00:00:00:00:00',
'01:00:00:00:00:00'))
if flood_unicast:
match_add = match_all
match_del = match_multicast
else:
match_add = match_multicast
match_del = match_all
actions = [ofpp.OFPActionOutput(port=p) for p in ports]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.LOCAL_FLOOD,
priority=1,
match=match_add,
instructions=instructions)
self._send_msg(msg)
self.delete_flows(table_id=tables.LOCAL_FLOOD, strict=True,
priority=1, match=match_del)
def local_flood_delete(self, network):
self.delete_flows(table_id=tables.LOCAL_FLOOD,
metadata=meta.mk_metadata(network))
def local_out_add_port(self, network, port, mac):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network), eth_dst=mac)
actions = [ofpp.OFPActionOutput(port=port)]
instructions = [
ofpp.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions),
]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.LOCAL_OUT,
priority=1,
match=match,
instructions=instructions)
self._send_msg(msg)
def local_out_delete_port(self, network, mac):
self.delete_flows(table_id=tables.LOCAL_OUT,
metadata=meta.mk_metadata(network), eth_dst=mac)
def arp_passthrough(self, network, tpa):
(dp, ofp, ofpp) = self._get_dp()
match = ofpp.OFPMatch(metadata=meta.mk_metadata(network),
eth_type=ether.ETH_TYPE_ARP,
arp_op=arp.ARP_REQUEST,
arp_tpa=tpa)
instructions = [
ofpp.OFPInstructionGotoTable(table_id=tables.TUNNEL_OUT)]
msg = ofpp.OFPFlowMod(dp,
table_id=tables.ARP_PASSTHROUGH,
priority=1,
idle_timeout=5,
match=match,
instructions=instructions)
self._send_msg(msg)
| apache-2.0 | 2,142,964,280,140,703,700 | 36.373464 | 79 | 0.574321 | false |
smurn/augast | doc/conf.py | 2 | 8530 | # -*- coding: utf-8 -*-
#
# Lenatu documentation build configuration file, created by
# sphinx-quickstart on Thu May 23 13:41:54 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lenatu'
copyright = u'2014, Stefan C. Mueller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pythondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python.tex', u'Lenatu Documentation',
u'Stefan C. Mueller', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python', u'Lenatu Documentation',
[u'Stefan C. Mueller'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python', u'Lenatu Documentation',
u'Stefan C. Mueller', 'python', '',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip) | mit | -5,939,633,539,836,018,000 | 30.713755 | 80 | 0.693435 | false |
ndardenne/pymatgen | pymatgen/io/xr.py | 2 | 6270 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides input and output mechanisms
for the xr file format, which is a modified CSSR
file format and, for example, used in GULP.
In particular, the module makes it easy
to remove shell positions from relaxations
that employed core-shell models.
"""
__author__ = "Nils Edvin Richard Zimmermann"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Nils Edvin Richard Zimmermann"
__email__ = "[email protected]"
__date__ = "June 23, 2016"
import re
from six.moves import map
import numpy as np
from monty.io import zopen
from math import fabs
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
class Xr(object):
"""
Basic object for working with xr files.
Args:
structure (Structure/IStructure): Structure object to create the
Xr object.
"""
def __init__(self, structure):
if not structure.is_ordered:
raise ValueError("Xr file can only be constructed from ordered "
"structure")
self.structure = structure
def __str__(self):
output = ["pymatgen {:.4f} {:.4f} {:.4f}"
.format(*self.structure.lattice.abc),
"{:.3f} {:.3f} {:.3f}"
.format(*self.structure.lattice.angles),
"{} 0".format(len(self.structure)),
"0 {}".format(self.structure.formula)]
# There are actually 10 more fields per site
# in a typical xr file from GULP, for example.
for i, site in enumerate(self.structure.sites):
output.append("{} {} {:.4f} {:.4f} {:.4f}"
.format(i + 1, site.specie, site.x, site.y, site.z))
mat = self.structure.lattice.matrix
for i in range(2):
for j in range(3):
output.append("{:.4f} {:.4f} {:.4f}".format(
mat[j][0], mat[j][1], mat[j][2]))
return "\n".join(output)
def write_file(self, filename):
"""
Write out an xr file.
Args:
filename (str): name of the file to write to.
"""
with zopen(filename, 'wt') as f:
f.write(str(self) + "\n")
@staticmethod
def from_string(string, use_cores=True, thresh=1.e-4):
"""
Creates an Xr object from a string representation.
Args:
string (str): string representation of an Xr object.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
string representation.
"""
lines = string.split("\n")
toks = lines[0].split()
lengths = [float(toks[i]) for i in range(1, len(toks))]
toks = lines[1].split()
angles = [float(i) for i in toks[0:3]]
toks = lines[2].split()
nsites = int(toks[0])
mat = np.zeros((3,3), dtype=float)
for i in range(3):
toks = lines[4+nsites+i].split()
toks2 = lines[4+nsites+i+3].split()
for j, item in enumerate(toks):
if item != toks2[j]:
raise RuntimeError("expected both matrices"
" to be the same in xr file")
mat[i] = np.array([float(w) for w in toks])
lat = Lattice(mat)
if fabs(lat.a-lengths[0])/fabs(lat.a) > thresh or \
fabs(lat.b-lengths[1])/fabs(lat.b) > thresh or \
fabs(lat.c-lengths[2])/fabs(lat.c) > thresh or \
fabs(lat.alpha-angles[0])/fabs(lat.alpha) > thresh or \
fabs(lat.beta-angles[1])/fabs(lat.beta) > thresh or \
fabs(lat.gamma-angles[2])/fabs(lat.gamma) > thresh:
raise RuntimeError("cell parameters in header ("+str(lengths)+\
", "+str(angles)+") are not consistent with Cartesian"+\
" lattice vectors ("+str(lat.abc)+", "+\
str(lat.angles)+")")
# Ignore line w/ index 3.
sp = []
coords = []
for j in range(nsites):
m = re.match("\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" +
"([0-9\-\.]+)", lines[4+j].strip())
if m:
tmp_sp = m.group(1)
if use_cores and tmp_sp[len(tmp_sp)-2:] == "_s":
continue
if not use_cores and tmp_sp[len(tmp_sp)-2:] == "_c":
continue
if tmp_sp[len(tmp_sp)-2] == "_":
sp.append(tmp_sp[0:len(tmp_sp)-2])
else:
sp.append(tmp_sp)
coords.append([float(m.group(i)) for i in range(2, 5)])
return Xr(Structure(lat, sp, coords, coords_are_cartesian=True))
@staticmethod
def from_file(filename, use_cores=True, thresh=1.e-4):
"""
Reads an xr-formatted file to create an Xr object.
Args:
filename (str): name of file to read from.
use_cores (bool): use core positions and discard shell
positions if set to True (default). Otherwise,
use shell positions and discard core positions.
thresh (float): relative threshold for consistency check
between cell parameters (lengths and angles) from
header information and cell vectors, respectively.
Returns:
xr (Xr): Xr object corresponding to the input
file.
"""
with zopen(filename, "rt") as f:
return Xr.from_string(
f.read(), use_cores=use_cores,
thresh=thresh)
| mit | 3,129,614,051,673,794,000 | 37.231707 | 78 | 0.533971 | false |
PaulWay/insights-core | insights/parsers/tests/test_ceph_cmd_json_parsing.py | 2 | 17729 | from insights.parsers.ceph_cmd_json_parsing import CephOsdDump, CephOsdDf, CephS, CephECProfileGet, CephCfgInfo, \
CephHealthDetail, CephDfDetail, CephOsdTree
from insights.tests import context_wrap
CEPH_OSD_DUMP_INFO = """
{
"epoch": 210,
"fsid": "2734f9b5-2013-48c1-8e96-d31423444717",
"created": "2016-11-12 16:08:46.307206",
"modified": "2017-03-07 08:55:53.301911",
"flags": "sortbitwise",
"cluster_snapshot": "",
"pool_max": 12,
"max_osd": 8,
"pools": [
{
"pool": 0,
"pool_name": "rbd",
"flags": 1,
"flags_names": "hashpspool",
"type": 1,
"size": 3,
"min_size": 2,
"crush_ruleset": 0,
"object_hash": 2,
"pg_num": 256
}
]
}
""".strip()
CEPH_OSD_DF_INFO = """
{
"nodes": [
{
"id": 0,
"name": "osd.0",
"type": "osd",
"type_id": 0,
"crush_weight": 1.091095,
"depth": 2,
"reweight": 1.000000,
"kb": 1171539620,
"kb_used": 4048208,
"kb_avail": 1167491412,
"utilization": 0.345546,
"var": 1.189094,
"pgs": 945
}
],
"stray": [],
"summary": {
"total_kb": 8200777340,
"total_kb_used": 23831128,
"total_kb_avail": 8176946212,
"average_utilization": 0.290596,
"min_var": 0.803396,
"max_var": 1.189094,
"dev": 0.035843
}
}
""".strip()
CEPH_S_INFO = """
{
"health": {
},
"pgmap": {
"pgs_by_state": [
{
"state_name": "active+clean",
"count": 1800
}
],
"version": 314179,
"num_pgs": 1800,
"data_bytes": 7943926574,
"bytes_used": 24405610496,
"bytes_avail": 8373190385664,
"bytes_total": 8397595996160
},
"fsmap": {
"epoch": 1,
"by_rank": []
}
}
""".strip()
CEPH_DF_DETAIL_INFO = """
{
"stats": {
"total_bytes": 17113243648,
"total_used_bytes": 203120640,
"total_avail_bytes": 16910123008,
"total_objects": 0
},
"pools": [
{
"name": "rbd",
"id": 0,
"stats": {
"kb_used": 0,
"bytes_used": 0,
"max_avail": 999252180,
"objects": 0,
"dirty": 0,
"rd": 0,
"rd_bytes": 0,
"wr": 0,
"wr_bytes": 0,
"raw_bytes_used": 0
}
},
{
"name": "ecpool",
"id": 2,
"stats": {
"kb_used": 0,
"bytes_used": 0,
"max_avail": 1998504360,
"objects": 0,
"dirty": 0,
"rd": 0,
"rd_bytes": 0,
"wr": 0,
"wr_bytes": 0,
"raw_bytes_used": 0
}
}
]
}
""".strip()
CEPH_HEALTH_DETAIL_INFO = """
{
"health": {
},
"timechecks": {
"epoch": 4,
"round": 0,
"round_status": "finished"
},
"summary": [],
"overall_status": "HEALTH_OK",
"detail": []
}
""".strip()
CEPH_OSD_EC_PROFILE_GET = """
{
"k": "2",
"m": "1",
"plugin": "jerasure",
"technique": "reed_sol_van"
}
""".strip()
CEPHINFO = """
{
"name": "osd.1",
"cluster": "ceph",
"debug_none": "0\/5",
"heartbeat_interval": "5",
"heartbeat_file": "",
"heartbeat_inject_failure": "0",
"perf": "true",
"max_open_files": "131072",
"ms_type": "simple",
"ms_tcp_nodelay": "true",
"ms_tcp_rcvbuf": "0",
"ms_tcp_prefetch_max_size": "4096",
"ms_initial_backoff": "0.2",
"ms_max_backoff": "15",
"ms_crc_data": "true",
"ms_crc_header": "true",
"ms_die_on_bad_msg": "false",
"ms_die_on_unhandled_msg": "false",
"ms_die_on_old_message": "false",
"ms_die_on_skipped_message": "false",
"ms_dispatch_throttle_bytes": "104857600",
"ms_bind_ipv6": "false",
"ms_bind_port_min": "6800",
"ms_bind_port_max": "7300",
"ms_bind_retry_count": "3",
"ms_bind_retry_delay": "5"
}
""".strip()
CEPH_OSD_TREE = """
{
"nodes": [
{
"id": -1,
"name": "default",
"type": "root",
"type_id": 10,
"children": [
-5,
-4,
-3,
-2
]
},
{
"id": -2,
"name": "dhcp-192-56",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -3,
"name": "dhcp-192-104",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -4,
"name": "dhcp-192-67",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -5,
"name": "localhost",
"type": "host",
"type_id": 1,
"children": [
1,
3,
5,
2,
4,
0
]
},
{
"id": 0,
"name": "osd.0",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 4,
"name": "osd.4",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "down",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 2,
"name": "osd.2",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 5,
"name": "osd.5",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 3,
"name": "osd.3",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 1,
"name": "osd.1",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
}
],
"stray": []
}
""".strip()
class TestCephOsdDump():
def test_ceph_osd_dump(self):
result = CephOsdDump(context_wrap(CEPH_OSD_DUMP_INFO))
assert result.data == {
'pool_max': 12, 'max_osd': 8,
'created': '2016-11-12 16:08:46.307206',
'modified': '2017-03-07 08:55:53.301911',
'epoch': 210, 'flags': u'sortbitwise',
'cluster_snapshot': '',
'fsid': '2734f9b5-2013-48c1-8e96-d31423444717',
'pools': [
{
'pool_name': 'rbd', 'flags_names': 'hashpspool',
'min_size': 2, 'object_hash': 2, 'flags': 1,
'pg_num': 256, 'crush_ruleset': 0, 'type': 1,
'pool': 0, 'size': 3
}
]
}
assert result['pools'][0]['min_size'] == 2
class TestCephOsdDf():
def test_ceph_osd_df(self):
result = CephOsdDf(context_wrap(CEPH_OSD_DF_INFO))
assert result.data == {
"nodes": [
{
"id": 0,
"name": "osd.0",
"type": "osd",
"type_id": 0,
"crush_weight": 1.091095,
"depth": 2,
"reweight": 1.000000,
"kb": 1171539620,
"kb_used": 4048208,
"kb_avail": 1167491412,
"utilization": 0.345546,
"var": 1.189094,
"pgs": 945
}
],
"stray": [],
"summary": {
"total_kb": 8200777340,
"total_kb_used": 23831128,
"total_kb_avail": 8176946212,
"average_utilization": 0.290596,
"min_var": 0.803396,
"max_var": 1.189094,
"dev": 0.035843
}
}
assert result['nodes'][0]['pgs'] == 945
class TestCephS():
def test_ceph_s(self):
result = CephS(context_wrap(CEPH_S_INFO))
assert result.data == {
"health": {
},
"pgmap": {
"pgs_by_state": [
{
"state_name": "active+clean",
"count": 1800
}
],
"version": 314179,
"num_pgs": 1800,
"data_bytes": 7943926574,
"bytes_used": 24405610496,
"bytes_avail": 8373190385664,
"bytes_total": 8397595996160
},
"fsmap": {
"epoch": 1,
"by_rank": []
}
}
assert result['pgmap']['pgs_by_state'][0]['state_name'] == 'active+clean'
class TestCephECProfileGet():
def test_ceph_ec_profile_get(self):
result = CephECProfileGet(context_wrap(CEPH_OSD_EC_PROFILE_GET))
assert result.data == {
"k": "2",
"m": "1",
"plugin": "jerasure",
"technique": "reed_sol_van"
}
assert result['k'] == "2"
assert result['m'] == "1"
class TestCephCfgInfo():
def test_cephcfginfo(self):
result = CephCfgInfo(context_wrap(CEPHINFO))
assert result.data == {
'ms_tcp_nodelay': 'true', 'ms_max_backoff': '15',
'cluster': 'ceph', 'ms_dispatch_throttle_bytes': '104857600',
'debug_none': '0/5', 'ms_crc_data': 'true', 'perf': 'true',
'ms_tcp_prefetch_max_size': '4096', 'ms_die_on_bad_msg': 'false',
'ms_bind_port_max': '7300', 'ms_bind_port_min': '6800',
'ms_die_on_skipped_message': 'false', 'heartbeat_file': '',
'heartbeat_interval': '5', 'heartbeat_inject_failure': '0',
'ms_crc_header': 'true', 'max_open_files': '131072',
'ms_die_on_old_message': 'false', 'name': 'osd.1',
'ms_type': 'simple', 'ms_initial_backoff': '0.2',
'ms_bind_retry_delay': '5', 'ms_bind_ipv6': 'false',
'ms_die_on_unhandled_msg': 'false', 'ms_tcp_rcvbuf': '0',
'ms_bind_retry_count': '3'
}
assert result.max_open_files == '131072'
class TestCephHealthDetail():
def test_ceph_health_detail(self):
result = CephHealthDetail(context_wrap(CEPH_HEALTH_DETAIL_INFO))
assert result.data == {
"health": {
},
"timechecks": {
"epoch": 4,
"round": 0,
"round_status": "finished"
},
"summary": [],
"overall_status": "HEALTH_OK",
"detail": []
}
assert result['overall_status'] == 'HEALTH_OK'
class TestCephDfDetail():
def test_ceph_df_detail(self):
result = CephDfDetail(context_wrap(CEPH_DF_DETAIL_INFO))
assert result.data == {
"stats": {
"total_bytes": 17113243648,
"total_used_bytes": 203120640,
"total_avail_bytes": 16910123008,
"total_objects": 0
},
"pools": [
{
"name": "rbd",
"id": 0,
"stats": {
"kb_used": 0,
"bytes_used": 0,
"max_avail": 999252180,
"objects": 0,
"dirty": 0,
"rd": 0,
"rd_bytes": 0,
"wr": 0,
"wr_bytes": 0,
"raw_bytes_used": 0
}
},
{
"name": "ecpool",
"id": 2,
"stats": {
"kb_used": 0,
"bytes_used": 0,
"max_avail": 1998504360,
"objects": 0,
"dirty": 0,
"rd": 0,
"rd_bytes": 0,
"wr": 0,
"wr_bytes": 0,
"raw_bytes_used": 0
}
}
]
}
assert result['stats']['total_avail_bytes'] == 16910123008
class TestCephOsdTree():
def test_ceph_osd_tree(self):
result = CephOsdTree(context_wrap(CEPH_OSD_TREE))
assert result.data == {
"nodes": [
{
"id": -1,
"name": "default",
"type": "root",
"type_id": 10,
"children": [
-5,
-4,
-3,
-2
]
},
{
"id": -2,
"name": "dhcp-192-56",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -3,
"name": "dhcp-192-104",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -4,
"name": "dhcp-192-67",
"type": "host",
"type_id": 1,
"children": []
},
{
"id": -5,
"name": "localhost",
"type": "host",
"type_id": 1,
"children": [
1,
3,
5,
2,
4,
0
]
},
{
"id": 0,
"name": "osd.0",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 4,
"name": "osd.4",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "down",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 2,
"name": "osd.2",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 5,
"name": "osd.5",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 3,
"name": "osd.3",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
},
{
"id": 1,
"name": "osd.1",
"type": "osd",
"type_id": 0,
"crush_weight": 0.002991,
"depth": 2,
"exists": 1,
"status": "up",
"reweight": 1.000000,
"primary_affinity": 1.000000
}
],
"stray": []
}
assert len(result['nodes'][0]['children']) == 4
| apache-2.0 | -4,988,048,955,624,086,000 | 26.701563 | 114 | 0.345423 | false |
andhit-r/account-financial-tools | account_move_line_search_extension/__openerp__.py | 10 | 1376 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2013-2015 Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Journal Items Search Extension',
'version': '8.0.0.6.0',
'license': 'AGPL-3',
'author': 'Noviat, Odoo Community Association (OCA)',
'category': 'Accounting & Finance',
'depends': ['account'],
'data': [
'account_view.xml',
'views/account.xml',
],
'qweb': [
'static/src/xml/account_move_line_search_extension.xml',
],
}
| agpl-3.0 | -3,842,046,906,003,299,300 | 36.189189 | 78 | 0.585756 | false |
robert-giaquinto/text-analysis | src/topic_model/journal_tokens.py | 1 | 1515 | from __future__ import division, print_function, absolute_import
import os
class JournalTokens(object):
"""
Iterable: on each iteration, return journal tokens in a list,
one list for each journal.
Process one cleaned journal at a time using generators, never
load the entire corpus into RAM.
Using an iterable so that memory isn't a concern, and
Gensim vocabulary and BOW building tools work well
with iterables.
"""
def __init__(self, filename):
self.filename = filename
def __iter__(self):
"""
Input file is assumed to be tab seperated, containg these fields:
siteId userId journalId createdAt [space seperated tokens from text]
Args: None, load data one at a time from self.filename
Return: None. Yield the tokens in a list, one journal at a time.
"""
with open(self.filename, "r") as fin:
for line in fin:
fields = line.replace("\n", "").split("\t")
tokens = fields[-1].split()
yield tokens
def main():
"""
Simple example of how to use this class
"""
jt = JournalTokens(filename = '/home/srivbane/shared/caringbridge/data/dev/clean_journals/cleaned_journals.txt')
print("Here are top the top lines of the cleaned journals in the dev folder")
for i, tokens in enumerate(jt):
if i > 5:
break
print(', '.join(sorted(tokens)))
if __name__ == "__main__":
main()
| mit | 3,059,249,082,219,789,000 | 31.234043 | 116 | 0.611881 | false |
gangadhar-kadam/latestchurcherp | erpnext/selling/doctype/sales_order/test_sales_order.py | 5 | 8703 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, add_days
import frappe.permissions
import unittest
from erpnext.selling.doctype.sales_order.sales_order \
import make_material_request, make_delivery_note, make_sales_invoice
class TestSalesOrder(unittest.TestCase):
def tearDown(self):
frappe.set_user("Administrator")
def test_make_material_request(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_material_request, so.name)
so.submit()
mr = make_material_request(so.name)
self.assertEquals(mr.material_request_type, "Purchase")
self.assertEquals(len(mr.get("items")), len(so.get("items")))
def test_make_delivery_note(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_delivery_note, so.name)
so.submit()
dn = make_delivery_note(so.name)
self.assertEquals(dn.doctype, "Delivery Note")
self.assertEquals(len(dn.get("items")), len(so.get("items")))
def test_make_sales_invoice(self):
so = make_sales_order(do_not_submit=True)
self.assertRaises(frappe.ValidationError, make_sales_invoice, so.name)
so.submit()
si = make_sales_invoice(so.name)
self.assertEquals(len(si.get("items")), len(so.get("items")))
self.assertEquals(len(si.get("items")), 1)
si.insert()
si.submit()
si1 = make_sales_invoice(so.name)
self.assertEquals(len(si1.get("items")), 0)
def test_update_qty(self):
so = make_sales_order()
create_dn_against_so(so.name, 6)
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 6)
# Check delivered_qty after make_sales_invoice without update_stock checked
si1 = make_sales_invoice(so.name)
si1.get("items")[0].qty = 6
si1.insert()
si1.submit()
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 6)
# Check delivered_qty after make_sales_invoice with update_stock checked
si2 = make_sales_invoice(so.name)
si2.set("update_stock", 1)
si2.get("items")[0].qty = 3
si2.insert()
si2.submit()
so.load_from_db()
self.assertEquals(so.get("items")[0].delivered_qty, 9)
def test_reserved_qty_for_partial_delivery(self):
existing_reserved_qty = get_reserved_qty()
so = make_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
dn = create_dn_against_so(so.name)
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 5)
# stop so
so.load_from_db()
so.stop_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
# unstop so
so.load_from_db()
so.unstop_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 5)
dn.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
# cancel
so.load_from_db()
so.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
def test_reserved_qty_for_over_delivery(self):
# set over-delivery tolerance
frappe.db.set_value('Item', "_Test Item", 'tolerance', 50)
existing_reserved_qty = get_reserved_qty()
so = make_sales_order()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
dn = create_dn_against_so(so.name, 15)
self.assertEqual(get_reserved_qty(), existing_reserved_qty)
dn.cancel()
self.assertEqual(get_reserved_qty(), existing_reserved_qty + 10)
def test_reserved_qty_for_partial_delivery_with_packing_list(self):
existing_reserved_qty_item1 = get_reserved_qty("_Test Item")
existing_reserved_qty_item2 = get_reserved_qty("_Test Item Home Desktop 100")
so = make_sales_order(item_code="_Test Sales BOM Item")
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
dn = create_dn_against_so(so.name)
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 25)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 10)
# stop so
so.load_from_db()
so.stop_sales_order()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"), existing_reserved_qty_item2)
# unstop so
so.load_from_db()
so.unstop_sales_order()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 25)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 10)
dn.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
so.load_from_db()
so.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"), existing_reserved_qty_item2)
def test_reserved_qty_for_over_delivery_with_packing_list(self):
# set over-delivery tolerance
frappe.db.set_value('Item', "_Test Sales BOM Item", 'tolerance', 50)
existing_reserved_qty_item1 = get_reserved_qty("_Test Item")
existing_reserved_qty_item2 = get_reserved_qty("_Test Item Home Desktop 100")
so = make_sales_order(item_code="_Test Sales BOM Item")
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
dn = create_dn_against_so(so.name, 15)
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2)
dn.cancel()
self.assertEqual(get_reserved_qty("_Test Item"), existing_reserved_qty_item1 + 50)
self.assertEqual(get_reserved_qty("_Test Item Home Desktop 100"),
existing_reserved_qty_item2 + 20)
def test_warehouse_user(self):
frappe.permissions.add_user_permission("Warehouse", "_Test Warehouse 1 - _TC", "[email protected]")
frappe.permissions.add_user_permission("Warehouse", "_Test Warehouse 2 - _TC1", "[email protected]")
frappe.permissions.add_user_permission("Company", "_Test Company 1", "[email protected]")
test_user = frappe.get_doc("User", "[email protected]")
test_user.add_roles("Sales User", "Material User")
test_user.remove_roles("Sales Manager")
test_user_2 = frappe.get_doc("User", "[email protected]")
test_user_2.add_roles("Sales User", "Material User")
test_user_2.remove_roles("Sales Manager")
frappe.set_user("[email protected]")
so = make_sales_order(company="_Test Company 1",
warehouse="_Test Warehouse 2 - _TC1", do_not_save=True)
so.conversion_rate = 0.02
so.plc_conversion_rate = 0.02
self.assertRaises(frappe.PermissionError, so.insert)
frappe.set_user("[email protected]")
so.insert()
frappe.permissions.remove_user_permission("Warehouse", "_Test Warehouse 1 - _TC", "[email protected]")
frappe.permissions.remove_user_permission("Warehouse", "_Test Warehouse 2 - _TC1", "[email protected]")
frappe.permissions.remove_user_permission("Company", "_Test Company 1", "[email protected]")
def test_block_delivery_note_against_cancelled_sales_order(self):
so = make_sales_order()
dn = make_delivery_note(so.name)
dn.insert()
so.cancel()
self.assertRaises(frappe.CancelledLinkError, dn.submit)
def make_sales_order(**args):
so = frappe.new_doc("Sales Order")
args = frappe._dict(args)
if args.transaction_date:
so.transaction_date = args.transaction_date
so.company = args.company or "_Test Company"
so.customer = args.customer or "_Test Customer"
so.delivery_date = add_days(so.transaction_date, 10)
so.append("items", {
"item_code": args.item or args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty or 10,
"rate": args.rate or 100,
"conversion_factor": 1.0,
})
if not args.do_not_save:
so.insert()
if not args.do_not_submit:
so.submit()
return so
def create_dn_against_so(so, delivered_qty=0):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
dn = make_delivery_note(so)
dn.get("items")[0].qty = delivered_qty or 5
dn.insert()
dn.submit()
return dn
def get_reserved_qty(item_code="_Test Item", warehouse="_Test Warehouse - _TC"):
return flt(frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
"reserved_qty"))
test_dependencies = ["Currency Exchange"] | agpl-3.0 | 238,892,535,877,050,530 | 31.969697 | 105 | 0.708721 | false |
nojhan/weboob-devel | weboob/tools/application/results.py | 7 | 6048 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities import UserError
__all__ = ['ResultsCondition', 'ResultsConditionError']
class IResultsCondition(object):
def is_valid(self, obj):
raise NotImplementedError()
class ResultsConditionError(UserError):
pass
class Condition(object):
def __init__(self, left, op, right):
self.left = left # Field of the object to test
self.op = op
self.right = right
def is_egal(left, right):
return left == right
def is_notegal(left, right):
return left != right
def is_sup(left, right):
return left < right
def is_inf(left, right):
return left > right
def is_in(left, right):
return left in right
functions = {'!=': is_notegal, '=': is_egal, '>': is_sup, '<': is_inf, '|': is_in}
class ResultsCondition(IResultsCondition):
condition_str = None
# Supported operators
# =, !=, <, > for float/int/decimal
# =, != for strings
# We build a list of list. Return true if each conditions of one list is TRUE
def __init__(self, condition_str):
self.limit = None
or_list = []
_condition_str = condition_str.split(' LIMIT ')
if len(_condition_str) == 2:
try:
self.limit = int(_condition_str[1])
except ValueError:
raise ResultsConditionError(u'Syntax error in the condition expression, please check documentation')
condition_str= _condition_str[0]
for _or in condition_str.split(' OR '):
and_list = []
for _and in _or.split(' AND '):
operator = None
for op in ['!=', '=', '>', '<', '|']:
if op in _and:
operator = op
break
if operator is None:
raise ResultsConditionError(u'Could not find a valid operator in sub-expression "%s". Protect the complete condition expression with quotes, or read the documentation in the man manual.' % _and)
try:
l, r = _and.split(operator)
except ValueError:
raise ResultsConditionError(u'Syntax error in the condition expression, please check documentation')
and_list.append(Condition(l, operator, r))
or_list.append(and_list)
self.condition = or_list
self.condition_str = condition_str
def is_valid(self, obj):
import weboob.tools.date as date_utils
import re
from datetime import date, datetime, timedelta
d = obj.to_dict()
# We evaluate all member of a list at each iteration.
for _or in self.condition:
myeval = True
for condition in _or:
if condition.left in d:
# in the case of id, test id@backend and id
if condition.left == 'id':
tocompare = condition.right
evalfullid = functions[condition.op](tocompare, d['id'])
evalid = functions[condition.op](tocompare, obj.id)
myeval = evalfullid or evalid
else:
# We have to change the type of v, always gived as string by application
typed = type(d[condition.left])
try:
if isinstance(d[condition.left], date_utils.date):
tocompare = date(*[int(x) for x in condition.right.split('-')])
elif isinstance(d[condition.left], date_utils.datetime):
splitted_datetime = condition.right.split(' ')
tocompare = datetime(*([int(x) for x in splitted_datetime[0].split('-')] +
[int(x) for x in splitted_datetime[1].split(':')]))
elif isinstance(d[condition.left], timedelta):
time_dict = re.match('^\s*((?P<hours>\d+)\s*h)?\s*((?P<minutes>\d+)\s*m)?\s*((?P<seconds>\d+)\s*s)?\s*$',
condition.right).groupdict()
tocompare = timedelta(seconds=int(time_dict['seconds'] or "0"),
minutes=int(time_dict['minutes'] or "0"),
hours=int(time_dict['hours'] or "0"))
else:
tocompare = typed(condition.right)
myeval = functions[condition.op](tocompare, d[condition.left])
except:
myeval = False
else:
raise ResultsConditionError(u'Field "%s" is not valid.' % condition.left)
# Do not try all AND conditions if one is false
if not myeval:
break
# Return True at the first OR valid condition
if myeval:
return True
# If we are here, all OR conditions are False
return False
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.condition_str
| agpl-3.0 | 6,004,166,350,627,672,000 | 38.789474 | 214 | 0.535714 | false |
muraliselva10/designate | contrib/tempest/dns_tests/admin/test_servers.py | 8 | 2840 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.dns import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class ServersAdminTestJSON(base.BaseDnsAdminTest):
"""
Tests Servers API Create, Get, List and Delete
that require admin privileges
"""
@classmethod
def setUpClass(cls):
super(ServersAdminTestJSON, cls).setUpClass()
cls.client = cls.os_adm.dns_servers_client
cls.setup_servers = list()
for i in range(2):
name = data_utils.rand_name('dns-server') + '.com.'
_, server = cls.client.create_server(name)
cls.setup_servers.append(server)
@classmethod
def tearDownClass(cls):
for server in cls.setup_servers:
cls.client.delete_server(server['id'])
super(ServersAdminTestJSON, cls).tearDownClass()
def _delete_server(self, server_id):
self.client.delete_server(server_id)
self.assertRaises(exceptions.NotFound,
self.client.get_server, server_id)
@test.attr(type='gate')
def test_list_servers(self):
# Get a list of servers
_, servers = self.client.list_servers()
# Verify servers created in setup class are in the list
for server in self.setup_servers:
self.assertIn(server['id'], map(lambda x: x['id'], servers))
@test.attr(type='smoke')
def test_create_update_get_delete_server(self):
# Create Dns Server
s_name1 = data_utils.rand_name('dns-server') + '.com.'
_, server = self.client.create_server(s_name1)
self.addCleanup(self._delete_server, server['id'])
self.assertEqual(s_name1, server['name'])
self.assertIsNotNone(server['id'])
# Update Dns Server
s_name2 = data_utils.rand_name('update-dns-server') + '.com.'
_, update_server = self.client.update_server(server['id'],
name=s_name2)
self.assertEqual(s_name2, update_server['name'])
# Get the details of Server
_, get_server = self.client.get_server(server['id'])
self.assertEqual(update_server['name'], get_server['name'])
| apache-2.0 | -4,746,614,242,254,964,000 | 39 | 78 | 0.641549 | false |
mozilla/moztrap | tests/model/test_mtmodel.py | 5 | 23252 | """
Tests for ``MTModel`` and related classes.
These tests use the ``Product`` model (and ``Suite`` for cascade-delete tests),
as its a simple model inherited from ``MTModel``, and this avoids the need for
a test-only model.
"""
import datetime
from mock import patch
from tests import case
class MTModelTestCase(case.DBTestCase):
"""Common base class for MTModel tests."""
def setUp(self):
"""Creates ``self.user`` for use by all tests."""
self.user = self.F.UserFactory.create()
class UserDeleteTest(MTModelTestCase):
"""Tests for deleting users, and the effect on MTModels."""
def test_delete_created_by_sets_null(self):
"""Deleting the created_by user sets created_by to None."""
u = self.F.UserFactory()
p = self.F.ProductFactory(user=u)
u.delete()
self.assertEqual(self.refresh(p).created_by, None)
def test_delete_modified_by_sets_null(self):
"""Deleting the modified_by user sets modified_by to None."""
p = self.F.ProductFactory()
u = self.F.UserFactory()
p.save(user=u)
u.delete()
self.assertEqual(self.refresh(p).modified_by, None)
def test_delete_deleted_by_sets_null(self):
"""Deleting the deleted_by user sets deleted_by to None."""
p = self.F.ProductFactory()
u = self.F.UserFactory()
p.delete(user=u)
u.delete()
self.assertEqual(self.refresh(p).deleted_by, None)
class MTModelMockNowTestCase(MTModelTestCase):
"""Base class for MTModel tests that need "now" mocked."""
def setUp(self):
"""Mocks datetime.utcnow() with datetime in self.utcnow."""
super(MTModelMockNowTestCase, self).setUp()
self.utcnow = datetime.datetime(2011, 12, 13, 22, 39)
patcher = patch("moztrap.model.mtmodel.datetime")
self.mock_utcnow = patcher.start().datetime.utcnow
self.mock_utcnow.return_value = self.utcnow
self.addCleanup(patcher.stop)
class CreateTest(MTModelMockNowTestCase):
"""Tests for (created/modified)_(on/by) when using Model.objects.create."""
def test_created_by_none(self):
"""If ``user`` is not given to create(), created_by is None."""
p = self.model.Product.objects.create(name="Foo")
self.assertEqual(p.created_by, None)
def test_created_by(self):
"""If ``user`` is given to create(), created_by is set."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
self.assertEqual(p.created_by, self.user)
def test_new_modified_by_none(self):
"""If ``user`` is not given to create(), modified_by is None."""
p = self.model.Product.objects.create(name="Foo")
self.assertEqual(p.modified_by, None)
def test_new_modified_by(self):
"""If ``user`` is given to create(), modified_by is set."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
self.assertEqual(p.modified_by, self.user)
def test_created_on(self):
"""create() method sets created_on."""
p = self.model.Product.objects.create(name="Foo")
self.assertEqual(p.created_on, self.utcnow)
def test_new_modified_on(self):
"""create() method sets modified_on."""
p = self.model.Product.objects.create(name="Foo")
self.assertEqual(p.modified_on, self.utcnow)
class SaveTest(MTModelMockNowTestCase):
"""Tests for (created/modified)_(on/by) when using instance.save."""
def test_created_by_none(self):
"""If ``user`` is not given to new obj save(), created_by is None."""
p = self.model.Product(name="Foo")
p.save()
self.assertEqual(p.created_by, None)
def test_created_by(self):
"""If ``user`` is given to new obj save(), created_by is set."""
p = self.model.Product(name="Foo")
p.save(user=self.user)
self.assertEqual(p.created_by, self.user)
def test_new_modified_by_none(self):
"""If ``user`` is not given to new obj save(), modified_by is None."""
p = self.model.Product(name="Foo")
p.save()
self.assertEqual(p.modified_by, None)
def test_new_modified_by(self):
"""If ``user`` is given to new obj save(), modified_by is set."""
p = self.model.Product(name="Foo")
p.save(user=self.user)
self.assertEqual(p.modified_by, self.user)
def test_created_on(self):
"""save() method sets created_on."""
p = self.model.Product(name="Foo")
p.save()
self.assertEqual(p.created_on, self.utcnow)
def test_new_modified_on(self):
"""save() method sets modified_on for new object."""
p = self.model.Product(name="Foo")
p.save()
self.assertEqual(p.modified_on, self.utcnow)
def test_modified_by_none(self):
"""If ``user`` is not given to save(), modified_by is set to None."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
p.save()
self.assertEqual(p.modified_by, None)
def test_modified_by(self):
"""If ``user`` is given to save(), modified_by is set."""
p = self.model.Product.objects.create(name="Foo")
p.save(user=self.user)
self.assertEqual(p.modified_by, self.user)
def test_modified_on(self):
"""save() method sets modified_on for existing object."""
p = self.model.Product.objects.create(name="Foo")
new_now = datetime.datetime(2012, 1, 1, 12, 0)
self.mock_utcnow.return_value = new_now
p.save()
self.assertEqual(p.modified_on, new_now)
def test_notrack_modified_on(self):
"""If notrack=True, doesn't update modified_on."""
self.mock_utcnow.return_value = datetime.datetime(2012, 1, 1)
p = self.model.Product.objects.create(name="Foo")
self.mock_utcnow.return_value = datetime.datetime(2012, 1, 2)
p.save(notrack=True)
self.assertEqual(
self.refresh(p).modified_on, datetime.datetime(2012, 1, 1))
def test_notrack_modified_by(self):
"""If notrack=True, doesn't update modified_by."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
p.save(notrack=True)
self.assertEqual(self.refresh(p).modified_by, self.user)
class UpdateTest(MTModelMockNowTestCase):
"""Tests for modified_(by/on) when using queryset.update."""
def test_modified_by_none(self):
"""queryset update() sets modified_by to None if not given user."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
self.model.Product.objects.update(name="Bar")
self.assertEqual(self.refresh(p).modified_by, None)
def test_modified_by(self):
"""queryset update() sets modified_by if given user."""
p = self.model.Product.objects.create(name="Foo")
self.model.Product.objects.update(name="Bar", user=self.user)
self.assertEqual(self.refresh(p).modified_by, self.user)
def test_modified_on(self):
"""queryset update() sets modified_on."""
p = self.model.Product.objects.create(name="Foo")
new_now = datetime.datetime(2012, 1, 1, 12, 0)
self.mock_utcnow.return_value = new_now
self.model.Product.objects.update(name="Bar")
self.assertEqual(self.refresh(p).modified_on, new_now)
def test_notrack_modified_on(self):
"""If notrack=True, doesn't update modified_on."""
self.mock_utcnow.return_value = datetime.datetime(2012, 1, 1)
p = self.model.Product.objects.create(name="Foo")
self.mock_utcnow.return_value = datetime.datetime(2012, 1, 2)
self.model.Product.objects.update(name="bar", notrack=True)
self.assertEqual(self.refresh(p).modified_on, datetime.datetime(2012, 1, 1))
def test_notrack_modified_by(self):
"""If notrack=True, doesn't update modified_by."""
p = self.model.Product.objects.create(name="Foo", user=self.user)
self.model.Product.objects.update(name="bar", notrack=True)
self.assertEqual(self.refresh(p).modified_by, self.user)
class DeleteTest(MTModelMockNowTestCase):
"""Tests for deleted_(by/on) when using instance.delete or qs.delete."""
def test_queryset_deleted_by_none(self):
"""queryset delete() sets deleted_by to None if not given user."""
p = self.F.ProductFactory.create()
self.model.Product.objects.all().delete()
self.assertEqual(self.refresh(p).deleted_by, None)
def test_queryset_deleted_by(self):
"""queryset delete() sets deleted_by if given user."""
p = self.F.ProductFactory.create()
self.model.Product.objects.all().delete(user=self.user)
self.assertEqual(self.refresh(p).deleted_by, self.user)
def test_queryset_deleted_on(self):
"""queryset delete() sets deleted_on."""
p = self.F.ProductFactory.create()
self.model.Product.objects.all().delete()
self.assertEqual(self.refresh(p).deleted_on, self.utcnow)
def test_deleted_by_none(self):
"""delete() sets deleted_by to None if not given user."""
p = self.F.ProductFactory.create()
p.delete()
self.assertEqual(self.refresh(p).deleted_by, None)
def test_deleted_by(self):
"""delete() sets deleted_by if given user."""
p = self.F.ProductFactory.create()
p.delete(user=self.user)
self.assertEqual(self.refresh(p).deleted_by, self.user)
def test_deleted_on(self):
"""delete() sets deleted_on."""
p = self.F.ProductFactory.create()
p.delete()
self.assertEqual(self.refresh(p).deleted_on, self.utcnow)
class HardDeleteTest(case.DBTestCase):
"""Tests for deletion with permanent=True."""
def test_instance(self):
"""Can hard-delete an instance with permanent=True."""
p = self.F.ProductFactory.create()
p.delete(permanent=True)
self.assertEqual(self.model.Product._base_manager.count(), 0)
def test_queryset(self):
"""Can hard-delete a queryset with permanent=True."""
self.F.ProductFactory.create()
self.model.Product.objects.all().delete(permanent=True)
self.assertEqual(self.model.Product._base_manager.count(), 0)
class CascadeDeleteTest(MTModelTestCase):
"""Tests for cascading soft-delete."""
def test_queryset_deleted_by_none(self):
"""queryset delete() sets deleted_by None if no user on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
self.model.Product.objects.all().delete()
self.assertEqual(self.refresh(s).deleted_by, None)
def test_queryset_deleted_by(self):
"""queryset delete() sets deleted_by to given user on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
self.model.Product.objects.all().delete(user=self.user)
self.assertEqual(self.refresh(s).deleted_by, self.user)
def test_queryset_deleted_on(self):
"""qs delete() sets deleted_on to same time as parent on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
self.model.Product.objects.all().delete()
p = self.refresh(p)
s = self.refresh(s)
self.assertIsNot(p.deleted_on, None)
self.assertEqual(s.deleted_on, p.deleted_on)
def test_deleted_by_none(self):
"""delete() sets deleted_by None if no user on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
p.delete()
self.assertEqual(self.refresh(s).deleted_by, None)
def test_deleted_by(self):
"""delete() sets deleted_by to given user on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
p.delete(user=self.user)
self.assertEqual(self.refresh(s).deleted_by, self.user)
def test_deleted_on(self):
"""delete() sets deleted_on to same time as parent on cascade."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
p.delete()
p = self.refresh(p)
s = self.refresh(s)
self.assertIsNot(p.deleted_on, None)
self.assertEqual(s.deleted_on, p.deleted_on)
def test_no_cascade_redelete(self):
"""cascade delete won't update deleted-on for previously deleted."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
# need to patch utcnow because MySQL doesn't give us better than
# one-second resolution on datetimes.
with patch("moztrap.model.mtmodel.datetime") as mock_dt:
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2011, 12, 13, 10, 23, 58)
s.delete()
# ... a day later...
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2011, 12, 14, 9, 18, 22)
p.delete()
self.assertNotEqual(
self.refresh(s).deleted_on, self.refresh(p).deleted_on)
class UndeleteMixin(object):
"""Utility assertions mixin for undelete tests."""
def assertNotDeleted(self, obj):
self.assertEqual(obj.deleted_on, None)
self.assertEqual(obj.deleted_by, None)
class UndeleteTest(UndeleteMixin, MTModelTestCase):
"""Tests for undelete using instance.undelete or qs.undelete."""
def test_instance(self):
"""instance.undelete() undeletes an instance."""
p = self.F.ProductFactory.create()
p.delete()
p.undelete()
self.assertNotDeleted(p)
def test_queryset(self):
"""qs.undelete() undeletes all objects in the queryset."""
p = self.F.ProductFactory.create()
p.delete()
self.model.Product.everything.all().undelete()
self.assertNotDeleted(self.refresh(p))
class CascadeUndeleteTest(UndeleteMixin, MTModelTestCase):
"""Tests for cascading undelete."""
def test_instance(self):
"""Undeleting an instance also undeletes cascade-deleted dependents."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
p.delete()
p = self.refresh(p)
p.undelete()
self.assertNotDeleted(self.refresh(s))
def test_queryset(self):
"""Undeleting a queryset also undeletes cascade-deleted dependents."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
p.delete()
self.model.Product.everything.all().undelete()
self.assertNotDeleted(self.refresh(s))
def test_cascade_limited(self):
"""Undelete only cascades to objs cascade-deleted with that object."""
p = self.F.ProductFactory.create()
s = self.F.SuiteFactory.create(product=p)
# need to patch utcnow because MySQL doesn't give us better than
# one-second resolution on datetimes.
with patch("moztrap.model.mtmodel.datetime") as mock_dt:
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2011, 12, 13, 10, 23, 58)
s.delete()
# ... a day later ...
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2011, 12, 14, 9, 18, 22)
p.delete()
self.refresh(p).undelete()
self.assertIsNot(self.refresh(s).deleted_on, None)
class CloneTest(UndeleteMixin, MTModelTestCase):
"""Tests for cloning."""
def test_cascade_non_m2m_or_reverse_fk(self):
"""Cascade-cloning an attr that isn't M2M or rev FK raises an error."""
p = self.F.ProductFactory.create()
with self.assertRaises(ValueError):
p.clone(cascade=["name"])
@patch("moztrap.model.mtmodel.datetime")
def test_updates_created_on(self, mock_dt):
"""Cloned objects get a new created-on timestamp."""
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2012, 1, 30)
p = self.F.ProductFactory.create()
cloned_on = datetime.datetime(2012, 1, 31)
mock_dt.datetime.utcnow.return_value = cloned_on
new = p.clone()
self.assertEqual(new.created_on, cloned_on)
def test_updates_created_by(self):
"""Cloned objects get a new created-by; the cloning user."""
u1 = self.F.UserFactory.create()
p = self.F.ProductFactory.create(user=u1)
u2 = self.F.UserFactory.create()
new = p.clone(user=u2)
self.assertEqual(new.created_by, u2)
@patch("moztrap.model.mtmodel.datetime")
def test_updates_modified_on(self, mock_dt):
"""Cloned objects get a new modified-on timestamp."""
mock_dt.datetime.utcnow.return_value = datetime.datetime(
2012, 1, 30)
p = self.F.ProductFactory.create()
cloned_on = datetime.datetime(2012, 1, 31)
mock_dt.datetime.utcnow.return_value = cloned_on
new = p.clone()
self.assertEqual(new.modified_on, cloned_on)
def test_updates_modified_by(self):
"""Cloned objects get a new modified-by; the cloning user."""
u1 = self.F.UserFactory.create()
p = self.F.ProductFactory.create(user=u1)
u2 = self.F.UserFactory.create()
new = p.clone(user=u2)
self.assertEqual(new.modified_by, u2)
class MTManagerTest(MTModelTestCase):
"""Tests for MTManager."""
def test_objects_doesnt_include_deleted(self):
"""``objects`` manager doesn't include deleted objects."""
p1 = self.F.ProductFactory.create()
p2 = self.F.ProductFactory.create()
p2.delete()
self.assertEqual(set(self.model.Product.objects.all()), set([p1]))
def test_everything_does_include_deleted(self):
"""``everything`` manager does include deleted objects."""
p1 = self.F.ProductFactory.create()
p2 = self.F.ProductFactory.create()
p2.delete()
self.assertEqual(
set(self.model.Product.everything.all()), set([p1, p2]))
def test_everything_is_default_manager(self):
"""``everything`` manager is the default manager."""
self.assertIs(
self.model.Product._default_manager, self.model.Product.everything)
def test_related_managers_dont_include_deleted(self):
"""Related managers don't include deleted objects."""
pv1 = self.F.ProductVersionFactory.create(version="2.0")
pv2 = self.F.ProductVersionFactory.create(product=pv1.product)
pv2.delete()
self.assertEqual(set(pv1.product.versions.all()), set([pv1]))
class TeamModelTest(case.DBTestCase):
"""Tests for TeamModel base class."""
@property
def TeamModel(self):
from moztrap.model.mtmodel import TeamModel
return TeamModel
def test_parent(self):
"""parent property is None in base class."""
t = self.TeamModel()
self.assertIsNone(t.parent)
class DraftStatusModelTest(case.DBTestCase):
"""
Tests for DraftStatusModel base class.
The tests use Run, a DraftStatusModel subclass, to avoid the need for a
test-only model.
"""
def test_activate(self):
"""Test the activate method."""
r = self.F.RunFactory.create(status="draft")
r.activate()
self.assertEqual(self.refresh(r).status, "active")
def test_draft(self):
"""Test the draft method."""
r = self.F.RunFactory.create(status="active")
r.draft()
self.assertEqual(self.refresh(r).status, "draft")
def test_deactivate(self):
"""Test the deactivate method."""
r = self.F.RunFactory.create(status="active")
r.deactivate()
self.assertEqual(self.refresh(r).status, "disabled")
def test_activate_by_user(self):
"""Test the activate method with a user."""
r = self.F.RunFactory.create(status="draft")
u = self.F.UserFactory.create()
r.activate(user=u)
self.assertEqual(self.refresh(r).modified_by, u)
def test_draft_by_user(self):
"""Test the draft method with a user."""
r = self.F.RunFactory.create(status="active")
u = self.F.UserFactory.create()
r.draft(user=u)
self.assertEqual(self.refresh(r).modified_by, u)
def test_deactivate_by_user(self):
"""Test the deactivate method with a user."""
r = self.F.RunFactory.create(status="active")
u = self.F.UserFactory.create()
r.deactivate(user=u)
self.assertEqual(self.refresh(r).modified_by, u)
class NotDeletedCountTest(case.DBTestCase):
"""Tests for NotDeletedCount aggregate."""
@property
def NotDeletedCount(self):
"""The aggregate class under test."""
from moztrap.model.mtmodel import NotDeletedCount
return NotDeletedCount
def test_counts_not_deleted(self):
"""Counts only not-deleted related objects."""
pv = self.F.ProductVersionFactory.create()
self.F.ProductVersionFactory.create(product=pv.product)
pv.delete()
p = self.model.Product.objects.annotate(
num_versions=self.NotDeletedCount("versions")).get()
self.assertEqual(p.num_versions, 1)
def test_aggregate_annotation(self):
"""
Works when aggregating over an annotation.
This is a bit of an artificially-constructed test in order to cover a
certain edge case in the aggregation code.
"""
from django.db.models import Count
pv1 = self.F.ProductVersionFactory.create()
self.F.ProductVersionFactory.create()
pv1.product.delete()
# In this case we are intentionally selecting all products, and
# counting all versions (even deleted ones) in the initial num_versions
# annotation. What we want to test is that the final aggregation counts
# only not-deleted products.
res = self.model.Product.everything.annotate(
num_versions=Count("versions")).aggregate(
products_with_versions=self.NotDeletedCount("num_versions"))
self.assertEqual(res, {"products_with_versions": 1})
class OptimisticLockingTest(case.DBTestCase):
"""Test optimistic locking to avoid silent overwrite on concurrent edits."""
def test_concurrency_error(self):
"""Save raises ConcurrencyError if version does not match the DB."""
p = self.F.ProductFactory()
p2 = self.model.Product.objects.get()
p2.name = "Name One"
p2.save()
p.name = "Name Two"
with self.assertRaises(self.model.ConcurrencyError):
p.save()
def test_queryset_update_increments_version(self):
"""Update via queryset increments version in database, not just save."""
p = self.F.ProductFactory()
self.model.Product.objects.update(name="Name One")
p.name = "Name Two"
with self.assertRaises(self.model.ConcurrencyError):
p.save()
| bsd-2-clause | -5,078,902,280,909,319,000 | 29.434555 | 84 | 0.628462 | false |
SmartInfrastructures/fuel-web-dev | nailgun/nailgun/api/v1/handlers/role.py | 1 | 4984 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nailgun.api.v1.handlers import base
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators.role import RoleValidator
from nailgun.errors import errors
from nailgun import objects
from nailgun.objects.serializers.role import RoleSerializer
class RoleHandler(base.SingleHandler):
validator = RoleValidator
def get_role_or_404(self, release_id, role_name):
role = self.single.get_by_release_id_role_name(release_id, role_name)
if role is None:
raise self.http(
404,
u'Role {name} for release {release} is not found'.format(
release=release_id, name=role_name))
return role
@content
def GET(self, release_id, role_name):
""":http:
* 200 (OK)
* 404 (no such object found)
"""
release = self.get_object_or_404(objects.Release, release_id)
return RoleSerializer.serialize_from_release(release, role_name)
@content
def PUT(self, release_id, role_name):
""":http:
* 200 (OK)
* 404 (no such object found)
"""
release = self.get_object_or_404(objects.Release, release_id)
data = self.checked_data(
self.validator.validate_update, instance=release)
objects.Release.update_role(release, data)
return RoleSerializer.serialize_from_release(release, role_name)
def DELETE(self, release_id, role_name):
""":http:
* 204 (object successfully deleted)
* 400 (cannot delete object)
* 404 (no such object found)
"""
release = self.get_object_or_404(objects.Release, release_id)
try:
self.validator.validate_delete(release, role_name)
except errors.CannotDelete as exc:
raise self.http(400, exc.message)
objects.Release.remove_role(release, role_name)
raise self.http(204)
class RoleCollectionHandler(base.CollectionHandler):
validator = RoleValidator
@content
def POST(self, release_id):
""":http:
* 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
"""
data = self.checked_data()
role_name = data['name']
release = self.get_object_or_404(objects.Release, release_id)
if role_name in release.roles_metadata:
raise self.http(
409,
'Role with name {name} already '
'exists for release {release}'.format(
name=role_name, release=release_id))
objects.Release.update_role(release, data)
raise self.http(
201, RoleSerializer.serialize_from_release(release, role_name))
@content
def GET(self, release_id):
""":http:
* 200 (OK)
"""
release = self.get_object_or_404(objects.Release, release_id)
role_names = six.iterkeys(release.roles_metadata)
return [RoleSerializer.serialize_from_release(release, name)
for name in role_names]
class ClusterRolesHandler(base.BaseHandler):
def _check_role(self, cluster, role_name):
available_roles = six.iterkeys(objects.Cluster.get_roles(cluster))
if role_name not in available_roles:
raise self.http(404, 'Role is not found for the cluster')
@content
def GET(self, cluster_id, role_name):
""":returns: JSON-ed metadata for the role
:http:
* 200 (OK)
* 404 (no such object found)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
self._check_role(cluster, role_name)
return RoleSerializer.serialize_from_cluster(cluster, role_name)
class ClusterRolesCollectionHandler(base.BaseHandler):
@content
def GET(self, cluster_id):
""":returns: collection of JSON-ed cluster roles metadata
:http:
* 200 (OK)
* 404 (no such object found)
"""
cluster = self.get_object_or_404(objects.Cluster, cluster_id)
roles_names = six.iterkeys(objects.Cluster.get_roles(cluster))
return [RoleSerializer.serialize_from_cluster(cluster, name)
for name in roles_names]
| apache-2.0 | 3,605,046,063,001,533,400 | 32.904762 | 78 | 0.624599 | false |
youprofit/kivy | kivy/interactive.py | 25 | 10122 | '''
Interactive launcher
====================
.. versionadded:: 1.3.0
The :class:`InteractiveLauncher` provides a user-friendly python shell
interface to an :class:`App` so that it can be prototyped and debugged
interactively.
.. note::
The Kivy API intends for some functions to only be run once or before the
main EventLoop has started. Methods that can normally be called during the
course of an application will work as intended, but specifically overriding
methods such as :meth:`on_touch` dynamically leads to trouble.
Creating an InteractiveLauncher
-------------------------------
Take your existing subclass of :class:`App` (this can be production code) and
pass an instance to the :class:`InteractiveLauncher` constructor.::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.button import Button
class MyApp(App):
def build(self):
return Button(test='Hello Shell')
launcher = InteractiveLauncher(MyApp())
launcher.run()
After pressing *enter*, the script will return. This allows the interpreter to
continue running. Inspection or modification of the :class:`App` can be done
safely through the InteractiveLauncher instance or the provided
:class:`SafeMembrane` class instances.
.. note::
If you want to test this example, start Python without any file to have
already an interpreter, and copy/paste all the lines. You'll still have the
interpreter at the end + the kivy application running.
Interactive Development
-----------------------
IPython provides a fast way to learn the Kivy API. The :class:`App` instance
and all of it's attributes, including methods and the entire widget tree,
can be quickly listed by using the '.' operator and pressing 'tab'. Try this
code in an Ipython shell.::
from kivy.interactive import InteractiveLauncher
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse
class MyPaintWidget(Widget):
def on_touch_down(self, touch):
with self.canvas:
Color(1, 1, 0)
d = 30.
Ellipse(pos=(touch.x - d/2, touch.y - d/2), size=(d, d))
class TestApp(App):
def build(self):
return Widget()
i = InteractiveLauncher(TestApp())
i.run()
i. # press 'tab' to list attributes of the app
i.root. # press 'tab' to list attributes of the root widget
# App is boring. Attach a new widget!
i.root.add_widget(MyPaintWidget())
i.safeIn()
# The application is now blocked.
# Click on the screen several times.
i.safeOut()
# The clicks will show up now
# Erase artwork and start over
i.root.canvas.clear()
.. note::
All of the proxies used in the module store their referent in the
:attr:`_ref` attribute, which can be accessed directly if needed, such as
for getting doc strings. :func:`help` and :func:`type` will access the
proxy, not its referent.
Directly Pausing the Application
--------------------------------
Both the :class:`InteractiveLauncher` and :class:`SafeMembrane` hold internal
references to the :class:`EventLoop`'s 'safe' and 'confirmed'
:class:`threading.Event` objects. You can use their safing methods to control
the application manually.
:meth:`SafeMembrane.safeIn` will cause the application to pause and
:meth:`SafeMembrane.safeOut` will allow a paused application
to continue running. This is potentially useful for scripting actions into
functions that need the screen to update etc.
.. note::
The pausing is implemented via the
:class:`Clocks' <kivy.clock.Clock>`
:meth:`~kivy.clock.ClockBase.schedule_once` method
and occurs before the start of each frame.
Adding Attributes Dynamically
-----------------------------
.. note::
This module uses threading and object proxies to encapsulate the running
:class:`App`. Deadlocks and memory corruption can occur if making direct
references inside the thread without going through the provided proxy(s).
The :class:`InteractiveLauncher` can have attributes added to it exactly like a
normal object and if these were created from outside the membrane, they will
not be threadsafe because the external references to them in the python
interpreter do not go through InteractiveLauncher's membrane behavior,
inherited from :class:`SafeMembrane`.
To threadsafe these external references, simply assign them to
:class:`SafeMembrane` instances of themselves like so::
from kivy.interactive import SafeMembrane
interactiveLauncher.attribute = myNewObject
# myNewObject is unsafe
myNewObject = SafeMembrane(myNewObject)
# myNewObject is now safe. Call at will.
myNewObject.method()
TODO
====
Unit tests, examples, and a better explanation of which methods are safe in a
running application would be nice. All three would be excellent.
Could be re-written with a context-manager style i.e.::
with safe:
foo()
Any use cases besides compacting code?
'''
__all__ = ('SafeMembrane', 'InteractiveLauncher')
import inspect
from kivy.app import App
from kivy.base import EventLoop
from kivy.clock import Clock
from threading import Thread, Event
def safeWait(dt):
EventLoop.confirmed.set()
EventLoop.safe.wait()
EventLoop.confirmed.clear()
def unwrap(ob):
while type(ob) == SafeMembrane:
ob = ob._ref
return ob
class SafeMembrane(object):
'''
This help is for a proxy object. Did you want help on the proxy's referent
instead? Try using help(<instance>._ref)
The SafeMembrane is a threadsafe proxy that also returns attributes as new
thread-safe objects
and makes thread-safe method calls, preventing thread-unsafe objects
from leaking into the user's environment.
'''
__slots__ = ('_ref', 'safe', 'confirmed')
def __init__(self, ob, *args, **kwargs):
self.confirmed = EventLoop.confirmed
self.safe = EventLoop.safe
self._ref = ob
def safeIn(self):
"""Provides a thread-safe entry point for interactive launching."""
self.safe.clear()
Clock.schedule_once(safeWait, -1)
self.confirmed.wait()
def safeOut(self):
"""Provides a thread-safe exit point for interactive launching."""
self.safe.set()
def isMethod(self, fn):
return inspect.ismethod(fn)
# Everything from this point on is just a series of thread-safing proxy
# methods that make calls against _ref and threadsafe whenever data will be
# written to or if a method will be called. SafeMembrane instances should
# be unwrapped whenever passing them into the thread
#use type() to determine if an object is a SafeMembrane while debugging
def __repr__(self):
return self._ref.__repr__()
def __call__(self, *args, **kw):
self.safeIn()
args = list(map(unwrap, args))
for k in list(kw.keys()):
kw[k] = unwrap(kw[k])
r = self._ref(*args, **kw)
self.safeOut()
if r is not None:
return SafeMembrane(r)
def __getattribute__(self, attr, oga=object.__getattribute__):
if attr.startswith('__') or attr == '_ref':
subject = oga(self, '_ref')
if attr == '_ref':
return subject
return getattr(subject, attr)
return oga(self, attr)
def __getattr__(self, attr, oga=object.__getattribute__):
r = getattr(oga(self, '_ref'), attr)
return SafeMembrane(r)
def __setattr__(self, attr, val, osa=object.__setattr__):
if (attr == '_ref'
or hasattr(type(self), attr) and not attr.startswith('__')):
osa(self, attr, val)
else:
self.safeIn()
val = unwrap(val)
setattr(self._ref, attr, val)
self.safeOut()
def __delattr__(self, attr, oda=object.__delattr__):
self.safeIn()
delattr(self._ref, attr)
self.safeOut()
def __bool__(self):
return bool(self._ref)
def __getitem__(self, arg):
return SafeMembrane(self._ref[arg])
def __setitem__(self, arg, val):
self.safeIn()
val = unwrap(val)
self._ref[arg] = val
self.safeOut()
def __delitem__(self, arg):
self.safeIn()
del self._ref[arg]
self.safeOut()
def __getslice__(self, i, j):
return SafeMembrane(self._ref[i:j])
def __setslice__(self, i, j, val):
self.safeIn()
val = unwrap(val)
self._ref[i:j] = val
self.safeOut()
def __delslice__(self, i, j):
self.safeIn()
del self._ref[i:j]
self.safeOut()
def __enter__(self, *args, **kwargs):
self.safeIn()
self._ref.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
self._ref.__exit__(*args, **kwargs)
self.safeOut()
class InteractiveLauncher(SafeMembrane):
'''
Proxy to an application instance that launches it in a thread and
then returns and acts as a proxy to the application in the thread.
'''
__slots__ = ('_ref', 'safe', 'confirmed', 'thread', 'app')
def __init__(self, app=None, *args, **kwargs):
if app is None:
app = App()
EventLoop.safe = Event()
self.safe = EventLoop.safe
self.safe.set()
EventLoop.confirmed = Event()
self.confirmed = EventLoop.confirmed
self.app = app
def startApp(app=app, *args, **kwargs):
app.run(*args, **kwargs)
self.thread = Thread(target=startApp, *args, **kwargs)
def run(self):
self.thread.start()
#Proxy behavior starts after this is set. Before this point, attaching
#widgets etc can only be done through the Launcher's app attribute
self._ref = self.app
def stop(self):
EventLoop.quit = True
self.thread.join()
#Act like the app instance even before _ref is set
def __repr__(self):
return self.app.__repr__()
| mit | -4,692,271,078,984,334,000 | 29.672727 | 79 | 0.644141 | false |
awemulya/fieldsight-kobocat | onadata/apps/fsforms/forms.py | 1 | 13888 | from django.db.models import Q
from django.forms.extras.widgets import SelectDateWidget
from django import forms
from django.utils.translation import ugettext_lazy as _
from onadata.apps.fieldsight.models import Site
from onadata.apps.fieldsight.utils.forms import HTML5BootstrapModelForm, KOModelForm, HRBSFormField
from onadata.apps.logger.models import XForm
from .models import FieldSightXF, Stage, Schedule, FormGroup, FORM_STATUS, EducationMaterial
SHARED_LEVEL = [('' ,'None'),(0, 'Global'), (1, 'Organization'), (2, 'Project'), ]
class AssignSettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.project_id = kwargs.pop('project', None)
try:
self.form_site = kwargs.get('instance').site.id
except:
self.form_site = 0
super(AssignSettingsForm, self).__init__(*args, **kwargs)
if self.project_id is not None:
sites = Site.objects.filter(project__id=self.project_id).exclude(pk=self.form_site)
else:
sites = Site.objects.all()
self.fields['site'].choices = [(obj.id, obj.name) for obj in sites]
self.fields['site'].empty_label = None
class Meta:
fields = ['site']
model = FieldSightXF
class FormTypeForm(forms.ModelForm):
CHOICES = [(3, 'Normal Form'),
(2, 'Schedule Form'),
(1, 'Stage Form')]
form_type = forms.ChoiceField(error_messages={'required': 'Please Choose Form Type !'},
choices=CHOICES, widget=forms.RadioSelect())
class Meta:
fields = ['form_type']
model = FieldSightXF
class FormStageDetailsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FormStageDetailsForm, self).__init__(*args, **kwargs)
obj_list = Stage.objects.filter(stage__isnull=False, fieldsightxf__isnull=True)
self.fields['stage'].choices = [(obj.id, obj.name) for obj in obj_list if not obj.form_exists()]
self.fields['stage'].empty_label = None
class Meta:
fields = ['stage']
model = FieldSightXF
class FormScheduleDetailsForm(forms.ModelForm):
class Meta:
fields = ['schedule']
model = FieldSightXF
class FSFormForm(forms.ModelForm):
class Meta:
exclude = ['site']
model = FieldSightXF
class GeneralFSForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(GeneralFSForm, self).__init__(*args, **kwargs)
if hasattr(self.request, "project") and self.request.project is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__project=self.request.project) |
Q(fieldsightformlibrary__organization=self.request.organization))
elif hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__organization=self.request.organization))
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True))
self.fields['xf'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['xf'].empty_label = None
self.fields['xf'].label = "Form"
class Meta:
fields = ['xf']
model = FieldSightXF
class GeneralForm(HTML5BootstrapModelForm, KOModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(GeneralForm, self).__init__(*args, **kwargs)
# if hasattr(self.request, "project") and self.request.project is not None:
# xform = XForm.objects.filter(
# Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
# Q(fieldsightformlibrary__project=self.request.project) |
# Q(fieldsightformlibrary__organization=self.request.organization))
if hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(user__user_profile__organization=self.request.organization), deleted_xform=None)
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True), deleted_xform=None)
self.fields['xf'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['xf'].empty_label = None
self.fields['xf'].label = "Form"
self.fields['default_submission_status'].choices = [(0, 'Pending'), (3, 'Approved'), ]
class Meta:
fields = ['xf', 'default_submission_status']
model = FieldSightXF
class StageForm(forms.ModelForm):
class Meta:
exclude = ['group', 'stage', 'site', 'shared_level', 'project', 'ready']
model = Stage
class MainStageEditForm(forms.ModelForm):
class Meta:
exclude = ['group', 'stage', 'site', 'shared_level', 'project', 'ready', 'order']
model = Stage
class SubStageEditForm(forms.ModelForm):
form = forms.ChoiceField(widget = forms.Select(), required=False,)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(SubStageEditForm, self).__init__(*args, **kwargs)
if hasattr(self.request, "project") and self.request.project is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__project=self.request.project) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
elif hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True), deleted_xform=None)
self.fields['form'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['form'].empty_label = None
class Meta:
exclude = ['group', 'stage', 'site', 'shared_level', 'project', 'ready', 'order']
model = Stage
class AddSubSTageForm(forms.ModelForm):
form = forms.ChoiceField(widget = forms.Select(), required=False,)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(AddSubSTageForm, self).__init__(*args, **kwargs)
if hasattr(self.request, "project") and self.request.project is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__project=self.request.project) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
elif hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True), deleted_xform=None)
self.fields['form'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['form'].empty_label = None
class Meta:
exclude = ['stage', 'group', 'shared_level', 'site', 'project', 'ready']
model = Stage
class AssignFormToStageForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AssignFormToStageForm, self).__init__(*args, **kwargs)
xf_list = XForm.objects.all()
self.fields['xf'].choices = [(f.id,f.title) for f in xf_list]
self.fields['xf'].empty_label = None
class Meta:
fields = ['xf','site','is_staged','is_scheduled','stage']
model = FieldSightXF
labels = {
"xf": _("Select Form"),
}
widgets = {'site': forms.HiddenInput(),
'is_staged': forms.HiddenInput(),
'is_scheduled': forms.HiddenInput(),
'stage': forms.HiddenInput()}
class AssignFormToScheduleForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AssignFormToScheduleForm, self).__init__(*args, **kwargs)
xf_list = XForm.objects.all()
self.fields['xf'].choices = [(xf.id, xf.title) for xf in xf_list]
self.fields['xf'].empty_label = None
class Meta:
fields = ['xf','site','is_staged','is_scheduled', 'schedule']
model = FieldSightXF
labels = {
"xf": _("Select Form"),
}
widgets = {'site': forms.HiddenInput(),
'is_staged': forms.HiddenInput(),
'is_scheduled': forms.HiddenInput(),
'schedule': forms.HiddenInput()}
BIRTH_YEAR_CHOICES = ('1980', '1981', '1982')
class ScheduleForm(forms.ModelForm):
form = forms.ChoiceField(widget = forms.Select(), required=False,)
form_type = forms.ChoiceField("Select Form Type",widget = forms.Select(
attrs={'id':'form_type','onchange':'Hide()'}), required=False,)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(ScheduleForm, self).__init__(*args, **kwargs)
if hasattr(self.request, "project") and self.request.project is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__project=self.request.project) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
elif hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(fieldsightformlibrary__is_global=True) |
Q(fieldsightformlibrary__organization=self.request.organization), deleted_xform=None)
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True), deleted_xform=None)
self.fields['form'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['form_type'].choices = [(0, "General"),(1, "Scheduled")]
self.fields['form'].empty_label = None
class Meta:
fields = ['form', 'form_type', 'name', 'date_range_start', 'date_range_end', 'selected_days', 'shared_level', 'schedule_level_id']
model = Schedule
widgets = { 'selected_days': forms.CheckboxSelectMultiple,
'date_range_start': SelectDateWidget,
'date_range_end': SelectDateWidget,
}
class KoScheduleForm(HTML5BootstrapModelForm, KOModelForm):
form = forms.ChoiceField(widget = forms.Select(), required=False,)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(KoScheduleForm, self).__init__(*args, **kwargs)
# if hasattr(self.request, "project") and self.request.project is not None:
# xform = XForm.objects.filter(
# Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True) |
# Q(fieldsightformlibrary__project=self.request.project) |
# Q(fieldsightformlibrary__organization=self.request.organization))
if hasattr(self.request, "organization") and self.request.organization is not None:
xform = XForm.objects.filter(
Q(user=self.request.user) |
Q(user__user_profile__organization=self.request.organization), deleted_xform=None)
else:
xform = XForm.objects.filter(
Q(user=self.request.user) | Q(fieldsightformlibrary__is_global=True), deleted_xform=None)
self.fields['form'].choices = [(obj.id, obj.title) for obj in xform]
self.fields['form'].empty_label = None
self.fields['form'].label = "Select Form"
class Meta:
fields = ['form', 'name']
model = Schedule
class GroupForm(forms.ModelForm):
shared_level = forms.ChoiceField(widget = forms.Select(), choices=(SHARED_LEVEL))
class Meta:
fields = ['name', 'description','shared_level']
model = FormGroup
class GroupEditForm(forms.ModelForm):
class Meta:
fields = ['name', 'description', 'id']
model = FormGroup
widgets = {'id': forms.HiddenInput(),}
def clean(self):
if FormGroup.objects.filter(name=self.cleaned_data['name']).exists():
if not FormGroup.objects.get(name=self.cleaned_data['name']).pk == self.instance.pk:
raise forms.ValidationError(_("Name Already Exists"))
class AlterAnswerStatus(forms.Form):
status = forms.ChoiceField(widget = forms.Select(),
choices = (FORM_STATUS), required = True,)
comment = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows':4, 'cols':15}))
class EducationalmaterialForm(HTML5BootstrapModelForm, KOModelForm):
class Meta:
model = EducationMaterial
exclude = ()
| bsd-2-clause | -5,197,019,126,817,885,000 | 39.847059 | 138 | 0.616791 | false |
tempbottle/eventlet | tests/greenio_test.py | 3 | 31504 | import array
import errno
import eventlet
import fcntl
import gc
from io import DEFAULT_BUFFER_SIZE
import os
import shutil
import socket as _orig_sock
import sys
import tempfile
from nose.tools import eq_
from eventlet import event, greenio, debug
from eventlet.hubs import get_hub
from eventlet.green import select, socket, time, ssl
from eventlet.support import capture_stderr, get_errno, six
import tests
if six.PY3:
buffer = memoryview
def bufsized(sock, size=1):
""" Resize both send and receive buffers on a socket.
Useful for testing trampoline. Returns the socket.
>>> import socket
>>> sock = bufsized(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, size)
return sock
def min_buf_size():
"""Return the minimum buffer size that the platform supports."""
test_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
return test_sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
def using_epoll_hub(_f):
try:
return 'epolls' in type(get_hub()).__module__
except Exception:
return False
def using_kqueue_hub(_f):
try:
return 'kqueue' in type(get_hub()).__module__
except Exception:
return False
class TestGreenSocket(tests.LimitedTestCase):
def assertWriteToClosedFileRaises(self, fd):
if sys.version_info[0] < 3:
# 2.x socket._fileobjects are odd: writes don't check
# whether the socket is closed or not, and you get an
# AttributeError during flush if it is closed
fd.write(b'a')
self.assertRaises(Exception, fd.flush)
else:
# 3.x io write to closed file-like pbject raises ValueError
self.assertRaises(ValueError, fd.write, b'a')
def test_connect_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
try:
gs.connect(('192.0.2.1', 80))
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
except socket.error as e:
# unreachable is also a valid outcome
if not get_errno(e) in (errno.EHOSTUNREACH, errno.ENETUNREACH):
raise
def test_accept_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
s.listen(50)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
try:
gs.accept()
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
def test_connect_ex_timeout(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
gs = greenio.GreenSocket(s)
e = gs.connect_ex(('192.0.2.1', 80))
if e not in (errno.EHOSTUNREACH, errno.ENETUNREACH):
self.assertEqual(e, errno.EAGAIN)
def test_recv_timeout(self):
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
client.recv(8192)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_recvfrom_timeout(self):
gs = greenio.GreenSocket(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
gs.settimeout(.1)
gs.bind(('', 0))
try:
gs.recvfrom(8192)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
def test_recvfrom_into_timeout(self):
buf = array.array('B')
gs = greenio.GreenSocket(
socket.socket(socket.AF_INET, socket.SOCK_DGRAM))
gs.settimeout(.1)
gs.bind(('', 0))
try:
gs.recvfrom_into(buf)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
def test_recv_into_timeout(self):
buf = array.array('B')
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
client.recv_into(buf)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_send_timeout(self):
self.reset_timeout(2)
listener = bufsized(eventlet.listen(('', 0)))
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
sock = bufsized(sock)
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = bufsized(greenio.GreenSocket(socket.socket()))
client.connect(addr)
try:
client.settimeout(0.00001)
msg = b"A" * 100000 # large enough number to overwhelm most buffers
total_sent = 0
# want to exceed the size of the OS buffer so it'll block in a
# single send
for x in range(10):
total_sent += client.send(msg)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_sendall_timeout(self):
listener = greenio.GreenSocket(socket.socket())
listener.bind(('', 0))
listener.listen(50)
evt = event.Event()
def server():
# accept the connection in another greenlet
sock, addr = listener.accept()
evt.wait()
gt = eventlet.spawn(server)
addr = listener.getsockname()
client = greenio.GreenSocket(socket.socket())
client.settimeout(0.1)
client.connect(addr)
try:
msg = b"A" * (8 << 20)
# want to exceed the size of the OS buffer so it'll block
client.sendall(msg)
self.fail("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
self.assertEqual(e.args[0], 'timed out')
evt.send()
gt.wait()
def test_close_with_makefile(self):
def accept_close_early(listener):
# verify that the makefile and the socket are truly independent
# by closing the socket prior to using the made file
try:
conn, addr = listener.accept()
fd = conn.makefile('wb')
conn.close()
fd.write(b'hello\n')
fd.close()
self.assertWriteToClosedFileRaises(fd)
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def accept_close_late(listener):
# verify that the makefile and the socket are truly independent
# by closing the made file and then sending a character
try:
conn, addr = listener.accept()
fd = conn.makefile('wb')
fd.write(b'hello')
fd.close()
conn.send(b'\n')
conn.close()
self.assertWriteToClosedFileRaises(fd)
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def did_it_work(server):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', server.getsockname()[1]))
fd = client.makefile('rb')
client.close()
assert fd.readline() == b'hello\n'
assert fd.read() == b''
fd.close()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 0))
server.listen(50)
killer = eventlet.spawn(accept_close_early, server)
did_it_work(server)
killer.wait()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 0))
server.listen(50)
killer = eventlet.spawn(accept_close_late, server)
did_it_work(server)
killer.wait()
def test_del_closes_socket(self):
def accept_once(listener):
# delete/overwrite the original conn
# object, only keeping the file object around
# closing the file object should close everything
try:
conn, addr = listener.accept()
conn = conn.makefile('wb')
conn.write(b'hello\n')
conn.close()
gc.collect()
self.assertWriteToClosedFileRaises(conn)
finally:
listener.close()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', 0))
server.listen(50)
killer = eventlet.spawn(accept_once, server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', server.getsockname()[1]))
fd = client.makefile('rb')
client.close()
assert fd.read() == b'hello\n'
assert fd.read() == b''
killer.wait()
def test_full_duplex(self):
large_data = b'*' * 10 * min_buf_size()
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
listener.listen(50)
bufsized(listener)
def send_large(sock):
sock.sendall(large_data)
def read_large(sock):
result = sock.recv(len(large_data))
while len(result) < len(large_data):
result += sock.recv(len(large_data))
self.assertEqual(result, large_data)
def server():
(sock, addr) = listener.accept()
sock = bufsized(sock)
send_large_coro = eventlet.spawn(send_large, sock)
eventlet.sleep(0)
result = sock.recv(10)
expected = b'hello world'
while len(result) < len(expected):
result += sock.recv(10)
self.assertEqual(result, expected)
send_large_coro.wait()
server_evt = eventlet.spawn(server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
bufsized(client)
large_evt = eventlet.spawn(read_large, client)
eventlet.sleep(0)
client.sendall(b'hello world')
server_evt.wait()
large_evt.wait()
client.close()
def test_sendall(self):
# test adapted from Marcus Cavanaugh's email
# it may legitimately take a while, but will eventually complete
self.timer.cancel()
second_bytes = 10
def test_sendall_impl(many_bytes):
bufsize = max(many_bytes // 15, 2)
def sender(listener):
(sock, addr) = listener.accept()
sock = bufsized(sock, size=bufsize)
sock.sendall(b'x' * many_bytes)
sock.sendall(b'y' * second_bytes)
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(("", 0))
listener.listen(50)
sender_coro = eventlet.spawn(sender, listener)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
bufsized(client, size=bufsize)
total = 0
while total < many_bytes:
data = client.recv(min(many_bytes - total, many_bytes // 10))
if not data:
break
total += len(data)
total2 = 0
while total < second_bytes:
data = client.recv(second_bytes)
if not data:
break
total2 += len(data)
sender_coro.wait()
client.close()
for how_many in (1000, 10000, 100000, 1000000):
test_sendall_impl(how_many)
def test_wrap_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 0))
sock.listen(50)
ssl.wrap_socket(sock)
def test_timeout_and_final_write(self):
# This test verifies that a write on a socket that we've
# stopped listening for doesn't result in an incorrect switch
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', 0))
server.listen(50)
bound_port = server.getsockname()[1]
def sender(evt):
s2, addr = server.accept()
wrap_wfile = s2.makefile('wb')
eventlet.sleep(0.02)
wrap_wfile.write(b'hi')
s2.close()
evt.send(b'sent via event')
evt = event.Event()
eventlet.spawn(sender, evt)
# lets the socket enter accept mode, which
# is necessary for connect to succeed on windows
eventlet.sleep(0)
try:
# try and get some data off of this pipe
# but bail before any is sent
eventlet.Timeout(0.01)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', bound_port))
wrap_rfile = client.makefile()
wrap_rfile.read(1)
self.fail()
except eventlet.TimeoutError:
pass
result = evt.wait()
self.assertEqual(result, b'sent via event')
server.close()
client.close()
@tests.skip_with_pyevent
def test_raised_multiple_readers(self):
debug.hub_prevent_multiple_readers(True)
def handle(sock, addr):
sock.recv(1)
sock.sendall(b"a")
raise eventlet.StopServe()
listener = eventlet.listen(('127.0.0.1', 0))
eventlet.spawn(eventlet.serve, listener, handle)
def reader(s):
s.recv(1)
s = eventlet.connect(('127.0.0.1', listener.getsockname()[1]))
a = eventlet.spawn(reader, s)
eventlet.sleep(0)
self.assertRaises(RuntimeError, s.recv, 1)
s.sendall(b'b')
a.wait()
@tests.skip_with_pyevent
@tests.skip_if(using_epoll_hub)
@tests.skip_if(using_kqueue_hub)
def test_closure(self):
def spam_to_me(address):
sock = eventlet.connect(address)
while True:
try:
sock.sendall(b'hello world')
except socket.error as e:
if get_errno(e) == errno.EPIPE:
return
raise
server = eventlet.listen(('127.0.0.1', 0))
sender = eventlet.spawn(spam_to_me, server.getsockname())
client, address = server.accept()
server.close()
def reader():
try:
while True:
data = client.recv(1024)
assert data
except socket.error as e:
# we get an EBADF because client is closed in the same process
# (but a different greenthread)
if get_errno(e) != errno.EBADF:
raise
def closer():
client.close()
reader = eventlet.spawn(reader)
eventlet.spawn_n(closer)
reader.wait()
sender.wait()
def test_invalid_connection(self):
# find an unused port by creating a socket then closing it
listening_socket = eventlet.listen(('127.0.0.1', 0))
port = listening_socket.getsockname()[1]
listening_socket.close()
self.assertRaises(socket.error, eventlet.connect, ('127.0.0.1', port))
def test_zero_timeout_and_back(self):
listen = eventlet.listen(('', 0))
# Keep reference to server side of socket
server = eventlet.spawn(listen.accept)
client = eventlet.connect(listen.getsockname())
client.settimeout(0.05)
# Now must raise socket.timeout
self.assertRaises(socket.timeout, client.recv, 1)
client.settimeout(0)
# Now must raise socket.error with EAGAIN
try:
client.recv(1)
assert False
except socket.error as e:
assert get_errno(e) == errno.EAGAIN
client.settimeout(0.05)
# Now socket.timeout again
self.assertRaises(socket.timeout, client.recv, 1)
server.wait()
def test_default_nonblocking(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
flags = fcntl.fcntl(sock1.fd.fileno(), fcntl.F_GETFL)
assert flags & os.O_NONBLOCK
sock2 = socket.socket(sock1.fd)
flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL)
assert flags & os.O_NONBLOCK
def test_dup_nonblocking(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
flags = fcntl.fcntl(sock1.fd.fileno(), fcntl.F_GETFL)
assert flags & os.O_NONBLOCK
sock2 = sock1.dup()
flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL)
assert flags & os.O_NONBLOCK
def test_skip_nonblocking(self):
sock1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock1.fd.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
assert flags & os.O_NONBLOCK == 0
sock2 = socket.socket(sock1.fd, set_nonblocking=False)
flags = fcntl.fcntl(sock2.fd.fileno(), fcntl.F_GETFL)
assert flags & os.O_NONBLOCK == 0
def test_sockopt_interface(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 0
assert sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) == b'\000'
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def test_socketpair_select(self):
# https://github.com/eventlet/eventlet/pull/25
s1, s2 = socket.socketpair()
assert select.select([], [s1], [], 0) == ([], [s1], [])
assert select.select([], [s1], [], 0) == ([], [s1], [])
def test_shutdown_safe(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.close()
# should not raise
greenio.shutdown_safe(sock)
def test_get_fileno_of_a_socket_works():
class DummySocket(object):
def fileno(self):
return 123
assert select.get_fileno(DummySocket()) == 123
def test_get_fileno_of_an_int_works():
assert select.get_fileno(123) == 123
expected_get_fileno_type_error_message = (
'Expected int or long, got <%s \'str\'>' % ('type' if six.PY2 else 'class'))
def test_get_fileno_of_wrong_type_fails():
try:
select.get_fileno('foo')
except TypeError as ex:
assert str(ex) == expected_get_fileno_type_error_message
else:
assert False, 'Expected TypeError not raised'
def test_get_fileno_of_a_socket_with_fileno_returning_wrong_type_fails():
class DummySocket(object):
def fileno(self):
return 'foo'
try:
select.get_fileno(DummySocket())
except TypeError as ex:
assert str(ex) == expected_get_fileno_type_error_message
else:
assert False, 'Expected TypeError not raised'
class TestGreenPipe(tests.LimitedTestCase):
@tests.skip_on_windows
def setUp(self):
super(self.__class__, self).setUp()
self.tempdir = tempfile.mkdtemp('_green_pipe_test')
def tearDown(self):
shutil.rmtree(self.tempdir)
super(self.__class__, self).tearDown()
def test_pipe(self):
r, w = os.pipe()
rf = greenio.GreenPipe(r, 'rb')
wf = greenio.GreenPipe(w, 'wb', 0)
def sender(f, content):
for ch in map(six.int2byte, six.iterbytes(content)):
eventlet.sleep(0.0001)
f.write(ch)
f.close()
one_line = b"12345\n"
eventlet.spawn(sender, wf, one_line * 5)
for i in range(5):
line = rf.readline()
eventlet.sleep(0.01)
self.assertEqual(line, one_line)
self.assertEqual(rf.readline(), b'')
def test_pipe_read(self):
# ensure that 'readline' works properly on GreenPipes when data is not
# immediately available (fd is nonblocking, was raising EAGAIN)
# also ensures that readline() terminates on '\n' and '\r\n'
r, w = os.pipe()
r = greenio.GreenPipe(r, 'rb')
w = greenio.GreenPipe(w, 'wb')
def writer():
eventlet.sleep(.1)
w.write(b'line\n')
w.flush()
w.write(b'line\r\n')
w.flush()
gt = eventlet.spawn(writer)
eventlet.sleep(0)
line = r.readline()
self.assertEqual(line, b'line\n')
line = r.readline()
self.assertEqual(line, b'line\r\n')
gt.wait()
def test_pip_read_until_end(self):
# similar to test_pip_read above but reading until eof
r, w = os.pipe()
r = greenio.GreenPipe(r, 'rb')
w = greenio.GreenPipe(w, 'wb')
w.write(b'c' * DEFAULT_BUFFER_SIZE * 2)
w.close()
buf = r.read() # no chunk size specified; read until end
self.assertEqual(len(buf), 2 * DEFAULT_BUFFER_SIZE)
self.assertEqual(buf[:3], b'ccc')
def test_pipe_writes_large_messages(self):
r, w = os.pipe()
r = greenio.GreenPipe(r, 'rb')
w = greenio.GreenPipe(w, 'wb')
large_message = b"".join([1024 * six.int2byte(i) for i in range(65)])
def writer():
w.write(large_message)
w.close()
gt = eventlet.spawn(writer)
for i in range(65):
buf = r.read(1024)
expected = 1024 * six.int2byte(i)
self.assertEqual(
buf, expected,
"expected=%r..%r, found=%r..%r iter=%d"
% (expected[:4], expected[-4:], buf[:4], buf[-4:], i))
gt.wait()
def test_seek_on_buffered_pipe(self):
f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024)
self.assertEqual(f.tell(), 0)
f.seek(0, 2)
self.assertEqual(f.tell(), 0)
f.write(b'1234567890')
f.seek(0, 2)
self.assertEqual(f.tell(), 10)
f.seek(0)
value = f.read(1)
self.assertEqual(value, b'1')
self.assertEqual(f.tell(), 1)
value = f.read(1)
self.assertEqual(value, b'2')
self.assertEqual(f.tell(), 2)
f.seek(0, 1)
self.assertEqual(f.readline(), b'34567890')
f.seek(-5, 1)
self.assertEqual(f.readline(), b'67890')
f.seek(0)
self.assertEqual(f.readline(), b'1234567890')
f.seek(0, 2)
self.assertEqual(f.readline(), b'')
def test_truncate(self):
f = greenio.GreenPipe(self.tempdir + "/TestFile", 'wb+', 1024)
f.write(b'1234567890')
f.truncate(9)
self.assertEqual(f.tell(), 9)
class TestGreenIoLong(tests.LimitedTestCase):
TEST_TIMEOUT = 10 # the test here might take a while depending on the OS
@tests.skip_with_pyevent
def test_multiple_readers(self, clibufsize=False):
debug.hub_prevent_multiple_readers(False)
recvsize = 2 * min_buf_size()
sendsize = 10 * recvsize
# test that we can have multiple coroutines reading
# from the same fd. We make no guarantees about which one gets which
# bytes, but they should both get at least some
def reader(sock, results):
while True:
data = sock.recv(recvsize)
if not data:
break
results.append(data)
results1 = []
results2 = []
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
listener.listen(50)
def server():
(sock, addr) = listener.accept()
sock = bufsized(sock)
try:
c1 = eventlet.spawn(reader, sock, results1)
c2 = eventlet.spawn(reader, sock, results2)
try:
c1.wait()
c2.wait()
finally:
c1.kill()
c2.kill()
finally:
sock.close()
server_coro = eventlet.spawn(server)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', listener.getsockname()[1]))
if clibufsize:
bufsized(client, size=sendsize)
else:
bufsized(client)
client.sendall(b'*' * sendsize)
client.close()
server_coro.wait()
listener.close()
assert len(results1) > 0
assert len(results2) > 0
debug.hub_prevent_multiple_readers()
@tests.skipped # by rdw because it fails but it's not clear how to make it pass
@tests.skip_with_pyevent
def test_multiple_readers2(self):
self.test_multiple_readers(clibufsize=True)
class TestGreenIoStarvation(tests.LimitedTestCase):
# fixme: this doesn't succeed, because of eventlet's predetermined
# ordering. two processes, one with server, one with client eventlets
# might be more reliable?
TEST_TIMEOUT = 300 # the test here might take a while depending on the OS
@tests.skipped # by rdw, because it fails but it's not clear how to make it pass
@tests.skip_with_pyevent
def test_server_starvation(self, sendloops=15):
recvsize = 2 * min_buf_size()
sendsize = 10000 * recvsize
results = [[] for i in range(5)]
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('127.0.0.1', 0))
port = listener.getsockname()[1]
listener.listen(50)
base_time = time.time()
def server(my_results):
sock, addr = listener.accept()
datasize = 0
t1 = None
t2 = None
try:
while True:
data = sock.recv(recvsize)
if not t1:
t1 = time.time() - base_time
if not data:
t2 = time.time() - base_time
my_results.append(datasize)
my_results.append((t1, t2))
break
datasize += len(data)
finally:
sock.close()
def client():
pid = os.fork()
if pid:
return pid
client = _orig_sock.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', port))
bufsized(client, size=sendsize)
for i in range(sendloops):
client.sendall(b'*' * sendsize)
client.close()
os._exit(0)
clients = []
servers = []
for r in results:
servers.append(eventlet.spawn(server, r))
for r in results:
clients.append(client())
for s in servers:
s.wait()
for c in clients:
os.waitpid(c, 0)
listener.close()
# now test that all of the server receive intervals overlap, and
# that there were no errors.
for r in results:
assert len(r) == 2, "length is %d not 2!: %s\n%s" % (len(r), r, results)
assert r[0] == sendsize * sendloops
assert len(r[1]) == 2
assert r[1][0] is not None
assert r[1][1] is not None
starttimes = sorted(r[1][0] for r in results)
endtimes = sorted(r[1][1] for r in results)
runlengths = sorted(r[1][1] - r[1][0] for r in results)
# assert that the last task started before the first task ended
# (our no-starvation condition)
assert starttimes[-1] < endtimes[0], \
"Not overlapping: starts %s ends %s" % (starttimes, endtimes)
maxstartdiff = starttimes[-1] - starttimes[0]
assert maxstartdiff * 2 < runlengths[0], \
"Largest difference in starting times more than twice the shortest running time!"
assert runlengths[0] * 2 > runlengths[-1], \
"Longest runtime more than twice as long as shortest!"
def test_set_nonblocking():
sock = _orig_sock.socket(socket.AF_INET, socket.SOCK_DGRAM)
fileno = sock.fileno()
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
assert orig_flags & os.O_NONBLOCK == 0
greenio.set_nonblocking(sock)
new_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
assert new_flags == (orig_flags | os.O_NONBLOCK)
def test_socket_del_fails_gracefully_when_not_fully_initialized():
# Regression introduced in da87716714689894f23d0db7b003f26d97031e83, reported in:
# * GH #137 https://github.com/eventlet/eventlet/issues/137
# * https://bugs.launchpad.net/oslo.messaging/+bug/1369999
class SocketSubclass(socket.socket):
def __init__(self):
pass
with capture_stderr() as err:
SocketSubclass()
assert err.getvalue() == ''
def test_double_close_219():
tests.run_isolated('greenio_double_close_219.py')
| mit | 7,905,878,560,932,346,000 | 31.444902 | 93 | 0.563897 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/opencoarrays/package.py | 5 | 2506 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Opencoarrays(CMakePackage):
"""OpenCoarrays is an open-source software project that produces an
application binary interface (ABI) supporting coarray Fortran (CAF)
compilers, an application programming interface (API) that supports users
of non-CAF compilers, and an associated compiler wrapper and program
launcher.
"""
homepage = "http://www.opencoarrays.org/"
url = "https://github.com/sourceryinstitute/OpenCoarrays/releases/download/1.8.4/OpenCoarrays-1.8.4.tar.gz"
version('1.8.10', '9ba1670647db4d986634abf743abfd6a')
version('1.8.4', '7c9eaffc3a0b5748d0d840e52ec9d4ad')
version('1.8.0', 'ca78d1507b2a118c75128c6c2e093e27')
version('1.7.4', '85ba87def461e3ff5a164de2e6482930')
version('1.6.2', '5a4da993794f3e04ea7855a6678981ba')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo',
'MinSizeRel', 'CodeCoverage'))
depends_on('mpi')
def cmake_args(self):
args = []
args.append("-DCMAKE_C_COMPILER=%s" % self.spec['mpi'].mpicc)
args.append("-DCMAKE_Fortran_COMPILER=%s" % self.spec['mpi'].mpifc)
return args
| lgpl-2.1 | 1,041,762,001,744,645,600 | 43.75 | 116 | 0.674781 | false |
kswiat/django | django/test/utils.py | 15 | 19704 | from contextlib import contextmanager
import logging
import re
import sys
import time
from unittest import skipUnless
import warnings
from functools import wraps
from xml.dom.minidom import parseString, Node
from django.apps import apps
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.core.signals import request_started
from django.db import reset_queries
from django.http import request
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.test.signals import template_rendered, setting_changed
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.translation import deactivate
__all__ = (
'Approximate', 'ContextList', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_template_loaders(ContextDecorator):
"""
Acts as a function decorator, context manager or start/end manager and
override the template loaders. It could be used in the following ways:
@override_template_loaders(SomeLoader())
def test_function(self):
...
with override_template_loaders(SomeLoader(), OtherLoader()) as loaders:
...
loaders = override_template_loaders.override(SomeLoader())
...
override_template_loaders.restore()
"""
def __init__(self, *loaders):
self.loaders = loaders
self.old_loaders = []
def __enter__(self):
self.old_loaders = loader.template_source_loaders
loader.template_source_loaders = self.loaders
return self.loaders
def __exit__(self, type, value, traceback):
loader.template_source_loaders = self.old_loaders
@classmethod
def override(cls, *loaders):
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = loaders
return loaders
@classmethod
def restore(cls):
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class TestTemplateLoader(loader.BaseLoader):
"A custom template loader that loads templates from a dictionary."
is_usable = True
def __init__(self, templates_dict):
self.templates_dict = templates_dict
def load_template_source(self, template_name, template_dirs=None,
skip_template=None):
try:
return (self.templates_dict[template_name],
"test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
class override_with_test_loader(override_template_loaders):
"""
Acts as a function decorator, context manager or start/end manager and
override the template loaders with the test loader. It could be used in the
following ways:
@override_with_test_loader(templates_dict, use_cached_loader=True)
def test_function(self):
...
with override_with_test_loader(templates_dict) as test_loader:
...
test_loader = override_with_test_loader.override(templates_dict)
...
override_with_test_loader.restore()
"""
def __init__(self, templates_dict, use_cached_loader=False):
self.loader = self._get_loader(templates_dict, use_cached_loader)
super(override_with_test_loader, self).__init__(self.loader)
def __enter__(self):
return super(override_with_test_loader, self).__enter__()[0]
@classmethod
def override(cls, templates_dict, use_cached_loader=False):
loader = cls._get_loader(templates_dict, use_cached_loader)
return super(override_with_test_loader, cls).override(loader)[0]
@classmethod
def _get_loader(cls, templates_dict, use_cached_loader=False):
if use_cached_loader:
loader = cached.Loader(('TestTemplateLoader',))
loader._cached_loaders = TestTemplateLoader(templates_dict)
return TestTemplateLoader(templates_dict)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase._pre_setup.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase._pre_setup, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join([c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE])
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.replace('\\n', '\n')
got = got.replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class IgnoreDeprecationWarningsMixin(object):
warning_classes = [RemovedInDjango19Warning]
def setUp(self):
super(IgnoreDeprecationWarningsMixin, self).setUp()
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
for warning_class in self.warning_classes:
warnings.filterwarnings("ignore", category=warning_class)
def tearDown(self):
self.catch_warnings.__exit__(*sys.exc_info())
super(IgnoreDeprecationWarningsMixin, self).tearDown()
class IgnorePendingDeprecationWarningsMixin(IgnoreDeprecationWarningsMixin):
warning_classes = [RemovedInDjango20Warning]
class IgnoreAllDeprecationWarningsMixin(IgnoreDeprecationWarningsMixin):
warning_classes = [RemovedInDjango20Warning, RemovedInDjango19Warning]
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
| bsd-3-clause | 5,782,281,711,833,257,000 | 32.797599 | 87 | 0.632765 | false |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/api/logo2.py | 3 | 2716 | """
Thanks to Tony Yu <[email protected]> for the logo design
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
from pylab import rand
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['axes.edgecolor'] = 'gray'
axalpha = 0.05
#figcolor = '#EFEFEF'
figcolor = 'white'
dpi = 80
fig = plt.figure(figsize=(6, 1.1),dpi=dpi)
fig.figurePatch.set_edgecolor(figcolor)
fig.figurePatch.set_facecolor(figcolor)
def add_math_background():
ax = fig.add_axes([0., 0., 1., 1.])
text = []
text.append((r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$", (0.7, 0.2), 20))
text.append((r"$\frac{d\rho}{d t} + \rho \vec{v}\cdot\nabla\vec{v} = -\nabla p + \mu\nabla^2 \vec{v} + \rho \vec{g}$",
(0.35, 0.9), 20))
text.append((r"$\int_{-\infty}^\infty e^{-x^2}dx=\sqrt{\pi}$",
(0.15, 0.3), 25))
#text.append((r"$E = mc^2 = \sqrt{{m_0}^2c^4 + p^2c^2}$",
# (0.7, 0.42), 30))
text.append((r"$F_G = G\frac{m_1m_2}{r^2}$",
(0.85, 0.7), 30))
for eq, (x, y), size in text:
ax.text(x, y, eq, ha='center', va='center', color="#11557c", alpha=0.25,
transform=ax.transAxes, fontsize=size)
ax.set_axis_off()
return ax
def add_matplotlib_text(ax):
ax.text(0.95, 0.5, 'matplotlib', color='#11557c', fontsize=65,
ha='right', va='center', alpha=1.0, transform=ax.transAxes)
def add_polar_bar():
ax = fig.add_axes([0.025, 0.075, 0.2, 0.85], polar=True, resolution=50)
ax.axesPatch.set_alpha(axalpha)
ax.set_axisbelow(True)
N = 7
arc = 2. * np.pi
theta = np.arange(0.0, arc, arc/N)
radii = 10 * np.array([0.2, 0.6, 0.8, 0.7, 0.4, 0.5, 0.8])
width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(cm.jet(r/10.))
bar.set_alpha(0.6)
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_visible(False)
for line in ax.get_ygridlines() + ax.get_xgridlines():
line.set_lw(0.8)
line.set_alpha(0.9)
line.set_ls('-')
line.set_color('0.5')
ax.set_yticks(np.arange(1, 9, 2))
ax.set_rmax(9)
if __name__ == '__main__':
main_axes = add_math_background()
add_polar_bar()
add_matplotlib_text(main_axes)
plt.show()
| gpl-2.0 | -2,824,182,137,780,392,000 | 31.722892 | 300 | 0.569219 | false |
larrybradley/astropy | astropy/modeling/mappings.py | 3 | 10835 | """
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
# pylint: disable=invalid-name
from .core import FittableModel, Model
from astropy.units import Quantity
__all__ = ['Mapping', 'Identity', 'UnitsMapping']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`astropy:compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
self._inputs = ()
self._outputs = ()
if n_inputs is None:
self._n_inputs = max(mapping) + 1
else:
self._n_inputs = n_inputs
self._n_outputs = len(mapping)
super().__init__(name=name, meta=meta)
self.inputs = tuple('x' + str(idx) for idx in range(self._n_inputs))
self.outputs = tuple('x' + str(idx) for idx in range(self._n_outputs))
self._mapping = mapping
self._input_units_strict = {key: False for key in self._inputs}
self._input_units_allow_dimensionless = {key: False for key in self._inputs}
@property
def n_inputs(self):
return self._n_inputs
@property
def n_outputs(self):
return self._n_outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return f'<Mapping({self.mapping})>'
return f'<Mapping({self.mapping}, name={self.name!r})>'
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError(f'{name} expects {self.n_inputs} inputs; got {len(args)}')
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
inv._n_inputs = len(inv._inputs)
inv._n_outputs = len(inv._outputs)
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return f'<Identity({self.n_inputs})>'
return f'<Identity({self.n_inputs}, name={self.name!r})>'
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
class UnitsMapping(Model):
"""
Mapper that operates on the units of the input, first converting to
canonical units, then assigning new units without further conversion.
Used by Model.coerce_units to support units on otherwise unitless models
such as Polynomial1D.
Parameters
----------
mapping : tuple
A tuple of (input_unit, output_unit) pairs, one per input, matched to the
inputs by position. The first element of the each pair is the unit that
the model will accept (specify ``dimensionless_unscaled``
to accept dimensionless input). The second element is the unit that the
model will return. Specify ``dimensionless_unscaled``
to return dimensionless Quantity, and `None` to return raw values without
Quantity.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : dict or bool, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like, optional
Free-form metadata to associate with this model.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),)) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D, UnitsMapping
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly
>>> model = model | UnitsMapping(((None, u.s),))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
def __init__(
self,
mapping,
input_units_equivalencies=None,
input_units_allow_dimensionless=False,
name=None,
meta=None
):
self._mapping = mapping
none_mapping_count = len([m for m in mapping if m[-1] is None])
if none_mapping_count > 0 and none_mapping_count != len(mapping):
raise ValueError("If one return unit is None, then all must be None")
# These attributes are read and handled by Model
self._input_units_strict = True
self.input_units_equivalencies = input_units_equivalencies
self._input_units_allow_dimensionless = input_units_allow_dimensionless
super().__init__(name=name, meta=meta)
# Can't invoke this until after super().__init__, since
# we need self.inputs and self.outputs to be populated.
self._rebuild_units()
def _rebuild_units(self):
self._input_units = {input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping)}
@property
def n_inputs(self):
return len(self._mapping)
@property
def n_outputs(self):
return len(self._mapping)
@property
def inputs(self):
return super().inputs
@inputs.setter
def inputs(self, value):
super(UnitsMapping, self.__class__).inputs.fset(self, value)
self._rebuild_units()
@property
def outputs(self):
return super().outputs
@outputs.setter
def outputs(self, value):
super(UnitsMapping, self.__class__).outputs.fset(self, value)
self._rebuild_units()
@property
def input_units(self):
return self._input_units
@property
def mapping(self):
return self._mapping
def evaluate(self, *args):
result = []
for arg, (_, return_unit) in zip(args, self.mapping):
if isinstance(arg, Quantity):
value = arg.value
else:
value = arg
if return_unit is None:
result.append(value)
else:
result.append(Quantity(value, return_unit, subok=True))
if self.n_outputs == 1:
return result[0]
else:
return tuple(result)
def __repr__(self):
if self.name is None:
return f"<UnitsMapping({self.mapping})>"
else:
return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
| bsd-3-clause | -1,022,625,556,190,745,300 | 32.236196 | 118 | 0.605261 | false |
blighj/django | tests/auth_tests/test_hashers.py | 21 | 23087 | from unittest import mock, skipUnless
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH,
BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher,
check_password, get_hasher, identify_hasher, is_password_usable,
make_password,
)
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
try:
import crypt
except ImportError:
crypt = None
else:
# On some platforms (e.g. OpenBSD), crypt.crypt() always return None.
if crypt.crypt('', '') is None:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
try:
import argon2
except ImportError:
argon2 = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPass(SimpleTestCase):
def test_simple(self):
encoded = make_password('lètmein')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_pbkdf2(self):
encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256')
self.assertEqual(encoded, 'pbkdf2_sha256$100000$seasalt$BNZ6eyaNc8qFTJPjrAq99hSYb73EgAdytAtdBg2Sdcc=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
def test_sha1(self):
encoded = make_password('lètmein', 'seasalt', 'sha1')
self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher'])
def test_md5(self):
encoded = make_password('lètmein', 'seasalt', 'md5')
self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher'])
def test_unsalted_md5(self):
encoded = make_password('lètmein', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('lètmein', alt_encoded))
self.assertFalse(check_password('lètmeinz', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_md5')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher'])
def test_unsalted_sha1(self):
encoded = make_password('lètmein', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('lètmein', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher'])
def test_crypt(self):
encoded = make_password('lètmei', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmei', encoded))
self.assertFalse(check_password('lètmeiz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
self.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('lètmein', hasher='bcrypt_sha256')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# password truncation no longer works
password = (
'VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5'
'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN'
)
encoded = make_password(password, hasher='bcrypt_sha256')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt_sha256')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt(self):
encoded = make_password('lètmein', hasher='bcrypt')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt')
self.assertTrue(blank_encoded.startswith('bcrypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_upgrade(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
self.assertNotEqual(hasher.rounds, 4)
old_rounds = hasher.rounds
try:
# Generate a password with 4 rounds.
hasher.rounds = 4
encoded = make_password('letmein', hasher='bcrypt')
rounds = hasher.safe_summary(encoded)['work factor']
self.assertEqual(rounds, '04')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered.
self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt'))
self.assertFalse(state['upgraded'])
# Revert to the old rounds count and ...
hasher.rounds = old_rounds
# ... check if the password would get updated to the new count.
self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt'))
self.assertTrue(state['upgraded'])
finally:
hasher.rounds = old_rounds
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_harden_runtime(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
with mock.patch.object(hasher, 'rounds', 4):
encoded = make_password('letmein', hasher='bcrypt')
with mock.patch.object(hasher, 'rounds', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Increasing rounds from 4 to 6 means an increase of 4 in workload,
# therefore hardening should run 3 times to make the timing the
# same (the original encode() call already ran once).
self.assertEqual(hasher.encode.call_count, 3)
# Get the original salt (includes the original workload factor)
algorithm, data = encoded.split('$', 1)
expected_call = (('wrong_password', force_bytes(data[:29])),)
self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3)
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password('', encoded))
self.assertFalse(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
with self.assertRaisesMessage(ValueError, 'Unknown password hashing algorith'):
identify_hasher(encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password('lètmein')))
def test_bad_algorithm(self):
msg = (
"Unknown password hashing algorithm '%s'. Did you specify it in "
"the PASSWORD_HASHERS setting?"
)
with self.assertRaisesMessage(ValueError, msg % 'lolcat'):
make_password('lètmein', hasher='lolcat')
with self.assertRaisesMessage(ValueError, msg % 'lolcat'):
identify_hasher('lolcat$salt$hash')
def test_bad_encoded(self):
self.assertFalse(is_password_usable('lètmein_badencoded'))
self.assertFalse(is_password_usable(''))
def test_low_level_pbkdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha256$100000$seasalt2$Tl4GMr+Yt1zzO1sbKoUaDBdds5NkR3RxaDWuQsliFrI=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha1$100000$seasalt2$dK/dL+ySBZ5zoR0+Zk3SB/VsH0U=')
self.assertTrue(hasher.verify('lètmein', encoded))
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
self.assertTrue(check_password('lètmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_no_upgrade(self):
encoded = make_password('lètmein')
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_pbkdf2_upgrade(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration count.
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_harden_runtime(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
with mock.patch.object(hasher, 'iterations', 1):
encoded = make_password('letmein')
with mock.patch.object(hasher, 'iterations', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Encode should get called once ...
self.assertEqual(hasher.encode.call_count, 1)
# ... with the original salt and 5 iterations.
algorithm, iterations, salt, hash = encoded.split('$', 3)
expected_call = (('wrong_password', salt, 5),)
self.assertEqual(hasher.encode.call_args, expected_call)
def test_pbkdf2_upgrade_new_hasher(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
with self.settings(PASSWORD_HASHERS=[
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
# No upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_check_password_calls_harden_runtime(self):
hasher = get_hasher('default')
encoded = make_password('letmein')
with mock.patch.object(hasher, 'harden_runtime'), \
mock.patch.object(hasher, 'must_update', return_value=True):
# Correct password supplied, no hardening needed
check_password('letmein', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 0)
# Wrong password supplied, hardening needed
check_password('wrong_password', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 1)
def test_load_library_no_algorithm(self):
msg = "Hasher 'BasePasswordHasher' doesn't specify a library attribute"
with self.assertRaisesMessage(ValueError, msg):
BasePasswordHasher()._load_library()
def test_load_library_importerror(self):
PlainHasher = type('PlainHasher', (BasePasswordHasher,), {'algorithm': 'plain', 'library': 'plain'})
msg = "Couldn't load 'PlainHasher' algorithm library: No module named 'plain'"
with self.assertRaisesMessage(ValueError, msg):
PlainHasher()._load_library()
@skipUnless(argon2, "argon2-cffi not installed")
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPassArgon2(SimpleTestCase):
def test_argon2(self):
encoded = make_password('lètmein', hasher='argon2')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('argon2$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, 'argon2')
# Blank passwords
blank_encoded = make_password('', hasher='argon2')
self.assertTrue(blank_encoded.startswith('argon2$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
# Old hashes without version attribute
encoded = (
'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO'
'4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg'
)
self.assertTrue(check_password('secret', encoded))
self.assertFalse(check_password('wrong', encoded))
def test_argon2_upgrade(self):
self._test_argon2_upgrade('time_cost', 'time cost', 1)
self._test_argon2_upgrade('memory_cost', 'memory cost', 16)
self._test_argon2_upgrade('parallelism', 'parallelism', 1)
def test_argon2_version_upgrade(self):
hasher = get_hasher('argon2')
state = {'upgraded': False}
encoded = (
'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO'
'4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg'
)
def setter(password):
state['upgraded'] = True
old_m = hasher.memory_cost
old_t = hasher.time_cost
old_p = hasher.parallelism
try:
hasher.memory_cost = 8
hasher.time_cost = 1
hasher.parallelism = 1
self.assertTrue(check_password('secret', encoded, setter, 'argon2'))
self.assertTrue(state['upgraded'])
finally:
hasher.memory_cost = old_m
hasher.time_cost = old_t
hasher.parallelism = old_p
def _test_argon2_upgrade(self, attr, summary_key, new_value):
hasher = get_hasher('argon2')
self.assertEqual('argon2', hasher.algorithm)
self.assertNotEqual(getattr(hasher, attr), new_value)
old_value = getattr(hasher, attr)
try:
# Generate hash with attr set to 1
setattr(hasher, attr, new_value)
encoded = make_password('letmein', hasher='argon2')
attr_value = hasher.safe_summary(encoded)[summary_key]
self.assertEqual(attr_value, new_value)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered.
self.assertTrue(check_password('letmein', encoded, setter, 'argon2'))
self.assertFalse(state['upgraded'])
# Revert to the old rounds count and ...
setattr(hasher, attr, old_value)
# ... check if the password would get updated to the new count.
self.assertTrue(check_password('letmein', encoded, setter, 'argon2'))
self.assertTrue(state['upgraded'])
finally:
setattr(hasher, attr, old_value)
| bsd-3-clause | 220,952,660,923,163,040 | 42.804183 | 111 | 0.64663 | false |
Floens/uchan | uchan/lib/plugin_manager.py | 1 | 1058 | import importlib
modules = []
def load_plugins(plugins, config_parser):
for plugin in plugins:
loaded_module = importlib.import_module('uchan.plugins.{}'.format(plugin))
add_module(loaded_module, plugin, config_parser)
def add_module(module, name, config_parser):
configuration = config_parser[name] if name in config_parser else None
info = execute_module_method(module, 'describe_plugin', False)
execute_module_method(module, 'on_enable', False, configuration)
# print('Loaded plugin {}: {}'.format(info['name'], info['description']))
modules.append(module)
def execute_hook(hook, *args, **kwargs):
for module in modules:
execute_module_method(module, hook, True, *args, **kwargs)
def execute_module_method(module, method_name, silent, *args, **kwargs):
try:
attr = getattr(module, method_name)
return attr(*args, **kwargs)
except AttributeError:
if not silent:
raise RuntimeError('The plugin {} must have the method {}'.format(module, method_name))
| mit | 7,098,008,039,008,940,000 | 32.0625 | 99 | 0.673913 | false |
andelf/pyqqweibo | setup.py | 1 | 1988 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 andelf <[email protected]>
# Time-stamp: <2011-06-06 16:08:30 andelf>
#from distutils.core import setup
from setuptools import setup, find_packages
import os, sys
lib_path = os.path.join(os.path.dirname(__file__))
sys.path.insert(0, lib_path)
from qqweibo import version
setup(name = "pyqqweibo",
version = version,
author = "andelf",
author_email = "[email protected]",
description = ("QQ weibo API SDK for python"),
license = "MIT",
keywords= "qq weibo library tencent microblog",
url="http://github.com/andelf/pyqqweibo",
packages = ['qqweibo'],
long_description = """
QQ weibo is a microblog service that is popular among Chinese.
This is the SDK tools for QQ weibo, written by @andelf.
* fix the bad offical api names and arangement.
* With model parser support, cache support.
* Under active development.
* Supports Python 2.6 - 2.7 and 3.1 - 3.2.
* Document & samples included.
* MIT license.
NOTE: this is a thrid party SDK, use at your risk.
""",
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Natural Language :: Chinese (Simplified)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Utilities"
],
zip_safe = True)
| mit | 3,269,890,709,477,016,000 | 33.275862 | 84 | 0.596076 | false |
Torben-D/open62541 | tools/pyUANamespace/ua_namespace.py | 1 | 30517 | #!/usr/bin/env/python
# -*- coding: utf-8 -*-
###
### Author: Chris Iatrou ([email protected])
### Version: rev 13
###
### This program was created for educational purposes and has been
### contributed to the open62541 project by the author. All licensing
### terms for this source is inherited by the terms and conditions
### specified for by the open62541 project (see the projects readme
### file for more information on the LGPL terms and restrictions).
###
### This program is not meant to be used in a production environment. The
### author is not liable for any complications arising due to the use of
### this program.
###
from __future__ import print_function
import sys
from time import struct_time, strftime, strptime, mktime
from struct import pack as structpack
import logging
from ua_builtin_types import *;
from ua_node_types import *;
from ua_constants import *;
from open62541_MacroHelper import open62541_MacroHelper
logger = logging.getLogger(__name__)
def getNextElementNode(xmlvalue):
if xmlvalue == None:
return None
xmlvalue = xmlvalue.nextSibling
while not xmlvalue == None and not xmlvalue.nodeType == xmlvalue.ELEMENT_NODE:
xmlvalue = xmlvalue.nextSibling
return xmlvalue
###
### Namespace Organizer
###
class opcua_namespace():
""" Class holding and managing a set of OPCUA nodes.
This class handles parsing XML description of namespaces, instantiating
nodes, linking references, graphing the namespace and compiling a binary
representation.
Note that nodes assigned to this class are not restricted to having a
single namespace ID. This class represents the entire physical address
space of the binary representation and all nodes that are to be included
in that segment of memory.
"""
nodes = []
nodeids = {}
aliases = {}
__linkLater__ = []
__binaryIndirectPointers__ = []
name = ""
knownNodeTypes = ""
namespaceIdentifiers = {} # list of 'int':'string' giving different namespace an array-mapable name
def __init__(self, name):
self.nodes = []
self.knownNodeTypes = ['variable', 'object', 'method', 'referencetype', \
'objecttype', 'variabletype', 'methodtype', \
'datatype', 'referencetype', 'aliases']
self.name = name
self.nodeids = {}
self.aliases = {}
self.namespaceIdentifiers = {}
self.__binaryIndirectPointers__ = []
def addNamespace(self, numericId, stringURL):
self.namespaceIdentifiers[numericId] = stringURL
def linkLater(self, pointer):
""" Called by nodes or references who have parsed an XML reference to a
node represented by a string.
No return value
XML String representations of references have the form 'i=xy' or
'ns=1;s="This unique Node"'. Since during the parsing of this attribute
only a subset of nodes are known/parsed, this reference string cannot be
linked when encountered.
References register themselves with the namespace to have their target
attribute (string) parsed by linkOpenPointers() when all nodes are
created, so that target can be dereferenced an point to an actual node.
"""
self.__linkLater__.append(pointer)
def getUnlinkedPointers(self):
""" Return the list of references registered for linking during the next call
of linkOpenPointers()
"""
return self.__linkLater__
def unlinkedItemCount(self):
""" Returns the number of unlinked references that will be processed during
the next call of linkOpenPointers()
"""
return len(self.__linkLater__)
def buildAliasList(self, xmlelement):
""" Parses the <Alias> XML Element present in must XML NodeSet definitions.
No return value
Contents the Alias element are stored in a dictionary for further
dereferencing during pointer linkage (see linkOpenPointer()).
"""
if not xmlelement.tagName == "Aliases":
logger.error("XMLElement passed is not an Aliaslist")
return
for al in xmlelement.childNodes:
if al.nodeType == al.ELEMENT_NODE:
if al.hasAttribute("Alias"):
aliasst = al.getAttribute("Alias")
if sys.version_info[0] < 3:
aliasnd = unicode(al.firstChild.data)
else:
aliasnd = al.firstChild.data
if not aliasst in self.aliases:
self.aliases[aliasst] = aliasnd
logger.debug("Added new alias \"" + str(aliasst) + "\" == \"" + str(aliasnd) + "\"")
else:
if self.aliases[aliasst] != aliasnd:
logger.error("Alias definitions for " + aliasst + " differ. Have " + self.aliases[aliasst] + " but XML defines " + aliasnd + ". Keeping current definition.")
def getNodeByBrowseName(self, idstring):
""" Returns the first node in the nodelist whose browseName matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.browseName()):
matches.append(n)
if len(matches) > 1:
logger.error("Found multiple nodes with same ID!?")
if len(matches) == 0:
return None
else:
return matches[0]
def getNodeByIDString(self, idstring):
""" Returns the first node in the nodelist whose id string representation
matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.id()):
matches.append(n)
if len(matches) > 1:
logger.error("Found multiple nodes with same ID!?")
if len(matches) == 0:
return None
else:
return matches[0]
def createNode(self, ndtype, xmlelement):
""" createNode is instantiates a node described by xmlelement, its type being
defined by the string ndtype.
No return value
If the xmlelement is an <Alias>, the contents will be parsed and stored
for later dereferencing during pointer linking (see linkOpenPointers).
Recognized types are:
* UAVariable
* UAObject
* UAMethod
* UAView
* UAVariableType
* UAObjectType
* UAMethodType
* UAReferenceType
* UADataType
For every recognized type, an appropriate node class is added to the node
list of the namespace. The NodeId of the given node is created and parsing
of the node attributes and elements is delegated to the parseXML() and
parseXMLSubType() functions of the instantiated class.
If the NodeID attribute is non-unique in the node list, the creation is
deferred and an error is logged.
"""
if not isinstance(xmlelement, dom.Element):
logger.error( "Error: Can not create node from invalid XMLElement")
return
# An ID is mandatory for everything but aliases!
id = None
for idname in ['NodeId', 'NodeID', 'nodeid']:
if xmlelement.hasAttribute(idname):
id = xmlelement.getAttribute(idname)
if ndtype == 'aliases':
self.buildAliasList(xmlelement)
return
elif id == None:
logger.info( "Error: XMLElement has no id, node will not be created!")
return
else:
id = opcua_node_id_t(id)
if str(id) in self.nodeids:
# Normal behavior: Do not allow duplicates, first one wins
#logger.error( "XMLElement with duplicate ID " + str(id) + " found, node will not be created!")
#return
# Open62541 behavior for header generation: Replace the duplicate with the new node
logger.info( "XMLElement with duplicate ID " + str(id) + " found, node will be replaced!")
nd = self.getNodeByIDString(str(id))
self.nodes.remove(nd)
self.nodeids.pop(str(nd.id()))
node = None
if (ndtype == 'variable'):
node = opcua_node_variable_t(id, self)
elif (ndtype == 'object'):
node = opcua_node_object_t(id, self)
elif (ndtype == 'method'):
node = opcua_node_method_t(id, self)
elif (ndtype == 'objecttype'):
node = opcua_node_objectType_t(id, self)
elif (ndtype == 'variabletype'):
node = opcua_node_variableType_t(id, self)
elif (ndtype == 'methodtype'):
node = opcua_node_methodType_t(id, self)
elif (ndtype == 'datatype'):
node = opcua_node_dataType_t(id, self)
elif (ndtype == 'referencetype'):
node = opcua_node_referenceType_t(id, self)
else:
logger.error( "No node constructor for type " + ndtype)
if node != None:
node.parseXML(xmlelement)
self.nodes.append(node)
self.nodeids[str(node.id())] = node
def removeNodeById(self, nodeId):
nd = self.getNodeByIDString(nodeId)
if nd == None:
return False
logger.debug("Removing nodeId " + str(nodeId))
self.nodes.remove(nd)
if nd.getInverseReferences() != None:
for ref in nd.getInverseReferences():
src = ref.target();
src.removeReferenceToNode(nd)
return True
def registerBinaryIndirectPointer(self, node):
""" Appends a node to the list of nodes that should be contained in the
first 765 bytes (255 pointer slots a 3 bytes) in the binary
representation (indirect referencing space).
This function is reserved for references and dataType pointers.
"""
if not node in self.__binaryIndirectPointers__:
self.__binaryIndirectPointers__.append(node)
return self.__binaryIndirectPointers__.index(node)
def getBinaryIndirectPointerIndex(self, node):
""" Returns the slot/index of a pointer in the indirect referencing space
(first 765 Bytes) of the binary representation.
"""
if not node in self.__binaryIndirectPointers__:
return -1
return self.__binaryIndirectPointers__.index(node)
def parseXML(self, xmldoc):
""" Reads an XML Namespace definition and instantiates node.
No return value
parseXML open the file xmldoc using xml.dom.minidom and searches for
the first UANodeSet Element. For every Element encountered, createNode
is called to instantiate a node of the appropriate type.
"""
typedict = {}
UANodeSet = dom.parse(xmldoc).getElementsByTagName("UANodeSet")
if len(UANodeSet) == 0:
logger.error( "Error: No NodeSets found")
return
if len(UANodeSet) != 1:
logger.error( "Error: Found more than 1 Nodeset in XML File")
UANodeSet = UANodeSet[0]
for nd in UANodeSet.childNodes:
if nd.nodeType != nd.ELEMENT_NODE:
continue
ndType = nd.tagName.lower()
if ndType[:2] == "ua":
ndType = ndType[2:]
elif not ndType in self.knownNodeTypes:
logger.warn("XML Element or NodeType " + ndType + " is unknown and will be ignored")
continue
if not ndType in typedict:
typedict[ndType] = 1
else:
typedict[ndType] = typedict[ndType] + 1
self.createNode(ndType, nd)
logger.debug("Currently " + str(len(self.nodes)) + " nodes in address space. Type distribution for this run was: " + str(typedict))
def linkOpenPointers(self):
""" Substitutes symbolic NodeIds in references for actual node instances.
No return value
References that have registered themselves with linkLater() to have
their symbolic NodeId targets ("ns=2; i=32") substited for an actual
node will be iterated by this function. For each reference encountered
in the list of unlinked/open references, the target string will be
evaluated and searched for in the node list of this namespace. If found,
the target attribute of the reference will be substituted for the
found node.
If a reference fails to get linked, it will remain in the list of
unlinked references. The individual items in this list can be
retrieved using getUnlinkedPointers().
"""
linked = []
logger.debug( str(self.unlinkedItemCount()) + " pointers need to get linked.")
for l in self.__linkLater__:
targetLinked = False
if not l.target() == None and not isinstance(l.target(), opcua_node_t):
if isinstance(l.target(),str) or isinstance(l.target(),unicode):
# If is not a node ID, it should be an alias. Try replacing it
# with a proper node ID
if l.target() in self.aliases:
l.target(self.aliases[l.target()])
# If the link is a node ID, try to find it hopening that no ass has
# defined more than one kind of id for that sucker
if l.target()[:2] == "i=" or l.target()[:2] == "g=" or \
l.target()[:2] == "b=" or l.target()[:2] == "s=" or \
l.target()[:3] == "ns=" :
tgt = self.getNodeByIDString(str(l.target()))
if tgt == None:
logger.error("Failed to link pointer to target (node not found) " + l.target())
else:
l.target(tgt)
targetLinked = True
else:
logger.error("Failed to link pointer to target (target not Alias or Node) " + l.target())
else:
logger.error("Failed to link pointer to target (don't know dummy type + " + str(type(l.target())) + " +) " + str(l.target()))
else:
logger.error("Pointer has null target: " + str(l))
referenceLinked = False
if not l.referenceType() == None:
if l.referenceType() in self.aliases:
l.referenceType(self.aliases[l.referenceType()])
tgt = self.getNodeByIDString(str(l.referenceType()))
if tgt == None:
logger.error("Failed to link reference type to target (node not found) " + l.referenceType())
else:
l.referenceType(tgt)
referenceLinked = True
else:
referenceLinked = True
if referenceLinked == True and targetLinked == True:
linked.append(l)
# References marked as "not forward" must be inverted (removed from source node, assigned to target node and relinked)
logger.warn("Inverting reference direction for all references with isForward==False attribute (is this correct!?)")
for n in self.nodes:
for r in n.getReferences():
if r.isForward() == False:
tgt = r.target()
if isinstance(tgt, opcua_node_t):
nref = opcua_referencePointer_t(n, parentNode=tgt)
nref.referenceType(r.referenceType())
tgt.addReference(nref)
# Create inverse references for all nodes
logger.debug("Updating all referencedBy fields in nodes for inverse lookups.")
for n in self.nodes:
n.updateInverseReferences()
for l in linked:
self.__linkLater__.remove(l)
if len(self.__linkLater__) != 0:
logger.warn(str(len(self.__linkLater__)) + " could not be linked.")
def sanitize(self):
remove = []
logger.debug("Sanitizing nodes and references...")
for n in self.nodes:
if n.sanitize() == False:
remove.append(n)
if not len(remove) == 0:
logger.warn(str(len(remove)) + " nodes will be removed because they failed sanitation.")
# FIXME: Some variable ns=0 nodes fail because they don't have DataType fields...
# How should this be handles!?
logger.warn("Not actually removing nodes... it's unclear if this is valid or not")
def getRoot(self):
""" Returns the first node instance with the browseName "Root".
"""
return self.getNodeByBrowseName("Root")
def buildEncodingRules(self):
""" Calls buildEncoding() for all DataType nodes (opcua_node_dataType_t).
No return value
"""
stat = {True: 0, False: 0}
for n in self.nodes:
if isinstance(n, opcua_node_dataType_t):
n.buildEncoding()
stat[n.isEncodable()] = stat[n.isEncodable()] + 1
logger.debug("Type definitions built/passed: " + str(stat))
def allocateVariables(self):
for n in self.nodes:
if isinstance(n, opcua_node_variable_t):
n.allocateValue()
def printDot(self, filename="namespace.dot"):
""" Outputs a graphiz/dot description of all nodes in the namespace.
Output will written into filename to be parsed by dot/neato...
Note that for namespaces with more then 20 nodes the reference structure
will lead to a mostly illegible and huge graph. Use printDotGraphWalk()
for plotting specific portions of a large namespace.
"""
file=open(filename, 'w+')
file.write("digraph ns {\n")
for n in self.nodes:
file.write(n.printDot())
file.write("}\n")
file.close()
def getSubTypesOf(self, tdNodes = None, currentNode = None, hasSubtypeRefNode = None):
# If this is a toplevel call, collect the following information as defaults
if tdNodes == None:
tdNodes = []
if currentNode == None:
currentNode = self.getNodeByBrowseName("HasTypeDefinition")
tdNodes.append(currentNode)
if len(tdNodes) < 1:
return []
if hasSubtypeRefNode == None:
hasSubtypeRefNode = self.getNodeByBrowseName("HasSubtype")
if hasSubtypeRefNode == None:
return tdNodes
# collect all subtypes of this node
for ref in currentNode.getReferences():
if ref.isForward() and ref.referenceType().id() == hasSubtypeRefNode.id():
tdNodes.append(ref.target())
self.getTypeDefinitionNodes(tdNodes=tdNodes, currentNode = ref.target(), hasSubtypeRefNode=hasSubtypeRefNode)
return tdNodes
def printDotGraphWalk(self, depth=1, filename="out.dot", rootNode=None, followInverse = False, excludeNodeIds=[]):
""" Outputs a graphiz/dot description the nodes centered around rootNode.
References beginning from rootNode will be followed for depth steps. If
"followInverse = True" is passed, then inverse (not Forward) references
will also be followed.
Nodes can be excluded from the graph by passing a list of NodeIds as
string representation using excludeNodeIds (ex ["i=53", "ns=2;i=453"]).
Output is written into filename to be parsed by dot/neato/srfp...
"""
iter = depth
processed = []
if rootNode == None or \
not isinstance(rootNode, opcua_node_t) or \
not rootNode in self.nodes:
root = self.getRoot()
else:
root = rootNode
file=open(filename, 'w+')
if root == None:
return
file.write("digraph ns {\n")
file.write(root.printDot())
refs=[]
if followInverse == True:
refs = root.getReferences(); # + root.getInverseReferences()
else:
for ref in root.getReferences():
if ref.isForward():
refs.append(ref)
while iter > 0:
tmp = []
for ref in refs:
if isinstance(ref.target(), opcua_node_t):
tgt = ref.target()
if not str(tgt.id()) in excludeNodeIds:
if not tgt in processed:
file.write(tgt.printDot())
processed.append(tgt)
if ref.isForward() == False and followInverse == True:
tmp = tmp + tgt.getReferences(); # + tgt.getInverseReferences()
elif ref.isForward() == True :
tmp = tmp + tgt.getReferences();
refs = tmp
iter = iter - 1
file.write("}\n")
file.close()
def __reorder_getMinWeightNode__(self, nmatrix):
rcind = -1
rind = -1
minweight = -1
minweightnd = None
for row in nmatrix:
rcind += 1
if row[0] == None:
continue
w = sum(row[1:])
if minweight < 0:
rind = rcind
minweight = w
minweightnd = row[0]
elif w < minweight:
rind = rcind
minweight = w
minweightnd = row[0]
return (rind, minweightnd, minweight)
def reorderNodesMinDependencies(self):
# create a matrix represtantion of all node
#
nmatrix = []
for n in range(0,len(self.nodes)):
nmatrix.append([None] + [0]*len(self.nodes))
typeRefs = []
tn = self.getNodeByBrowseName("HasTypeDefinition")
if tn != None:
typeRefs.append(tn)
typeRefs = typeRefs + self.getSubTypesOf(currentNode=tn)
subTypeRefs = []
tn = self.getNodeByBrowseName("HasSubtype")
if tn != None:
subTypeRefs.append(tn)
subTypeRefs = subTypeRefs + self.getSubTypesOf(currentNode=tn)
logger.debug("Building connectivity matrix for node order optimization.")
# Set column 0 to contain the node
for node in self.nodes:
nind = self.nodes.index(node)
nmatrix[nind][0] = node
# Determine the dependencies of all nodes
for node in self.nodes:
nind = self.nodes.index(node)
#print "Examining node " + str(nind) + " " + str(node)
for ref in node.getReferences():
if isinstance(ref.target(), opcua_node_t):
tind = self.nodes.index(ref.target())
# Typedefinition of this node has precedence over this node
if ref.referenceType() in typeRefs and ref.isForward():
nmatrix[nind][tind+1] += 1
# isSubTypeOf/typeDefinition of this node has precedence over this node
elif ref.referenceType() in subTypeRefs and not ref.isForward():
nmatrix[nind][tind+1] += 1
# Else the target depends on us
elif ref.isForward():
nmatrix[tind][nind+1] += 1
logger.debug("Using Djikstra topological sorting to determine printing order.")
reorder = []
while len(reorder) < len(self.nodes):
(nind, node, w) = self.__reorder_getMinWeightNode__(nmatrix)
#print str(100*float(len(reorder))/len(self.nodes)) + "% " + str(w) + " " + str(node) + " " + str(node.browseName())
reorder.append(node)
for ref in node.getReferences():
if isinstance(ref.target(), opcua_node_t):
tind = self.nodes.index(ref.target())
if ref.referenceType() in typeRefs and ref.isForward():
nmatrix[nind][tind+1] -= 1
elif ref.referenceType() in subTypeRefs and not ref.isForward():
nmatrix[nind][tind+1] -= 1
elif ref.isForward():
nmatrix[tind][nind+1] -= 1
nmatrix[nind][0] = None
self.nodes = reorder
logger.debug("Nodes reordered.")
return
def printOpen62541Header(self, printedExternally=[], supressGenerationOfAttribute=[], outfilename=""):
unPrintedNodes = []
unPrintedRefs = []
code = []
header = []
# Reorder our nodes to produce a bare minimum of bootstrapping dependencies
logger.debug("Reordering nodes for minimal dependencies during printing.")
self.reorderNodesMinDependencies()
# Some macros (UA_EXPANDEDNODEID_MACRO()...) are easily created, but
# bulky. This class will help to offload some code.
codegen = open62541_MacroHelper(supressGenerationOfAttribute=supressGenerationOfAttribute)
# Populate the unPrinted-Lists with everything we have.
# Every Time a nodes printfunction is called, it will pop itself and
# all printed references from these lists.
for n in self.nodes:
if not n in printedExternally:
unPrintedNodes.append(n)
else:
logger.debug("Node " + str(n.id()) + " is being ignored.")
for n in unPrintedNodes:
for r in n.getReferences():
if (r.target() != None) and (r.target().id() != None) and (r.parent() != None):
unPrintedRefs.append(r)
logger.debug(str(len(unPrintedNodes)) + " Nodes, " + str(len(unPrintedRefs)) + "References need to get printed.")
header.append("/* WARNING: This is a generated file.\n * Any manual changes will be overwritten.\n\n */")
code.append("/* WARNING: This is a generated file.\n * Any manual changes will be overwritten.\n\n */")
header.append('#ifndef '+outfilename.upper()+'_H_')
header.append('#define '+outfilename.upper()+'_H_')
header.append('#ifdef UA_NO_AMALGAMATION')
header.append('#include "server/ua_server_internal.h"')
header.append('#include "server/ua_nodes.h"')
header.append('#include "ua_util.h"')
header.append('#include "ua_types.h"')
header.append('#include "ua_types_encoding_binary.h"')
header.append('#include "ua_types_generated_encoding_binary.h"')
header.append('#include "ua_transport_generated_encoding_binary.h"')
header.append('#else')
header.append('#include "open62541.h"')
header.append('#define NULL ((void *)0)')
header.append('#endif')
code.append('#include "'+outfilename+'.h"')
code.append("UA_INLINE void "+outfilename+"(UA_Server *server) {")
# Before printing nodes, we need to request additional namespace arrays from the server
for nsid in self.namespaceIdentifiers:
if nsid == 0 or nsid==1:
continue
else:
name = self.namespaceIdentifiers[nsid]
name = name.replace("\"","\\\"")
code.append("UA_Server_addNamespace(server, \"" + name + "\");")
# Find all references necessary to create the namespace and
# "Bootstrap" them so all other nodes can safely use these referencetypes whenever
# they can locate both source and target of the reference.
logger.debug("Collecting all references used in the namespace.")
refsUsed = []
for n in self.nodes:
# Since we are already looping over all nodes, use this chance to print NodeId defines
if n.id().ns != 0:
nc = n.nodeClass()
if nc != NODE_CLASS_OBJECT and nc != NODE_CLASS_VARIABLE and nc != NODE_CLASS_VIEW:
header = header + codegen.getNodeIdDefineString(n)
# Now for the actual references...
for r in n.getReferences():
# Only print valid refernces in namespace 0 (users will not want their refs bootstrapped)
if not r.referenceType() in refsUsed and r.referenceType() != None and r.referenceType().id().ns == 0:
refsUsed.append(r.referenceType())
logger.debug(str(len(refsUsed)) + " reference types are used in the namespace, which will now get bootstrapped.")
for r in refsUsed:
code = code + r.printOpen62541CCode(unPrintedNodes, unPrintedRefs);
header.append("extern void "+outfilename+"(UA_Server *server);\n")
header.append("#endif /* "+outfilename.upper()+"_H_ */")
# Note to self: do NOT - NOT! - try to iterate over unPrintedNodes!
# Nodes remove themselves from this list when printed.
logger.debug("Printing all other nodes.")
for n in self.nodes:
code = code + n.printOpen62541CCode(unPrintedNodes, unPrintedRefs, supressGenerationOfAttribute=supressGenerationOfAttribute)
if len(unPrintedNodes) != 0:
logger.warn("" + str(len(unPrintedNodes)) + " nodes could not be translated to code.")
else:
logger.debug("Printing suceeded for all nodes")
if len(unPrintedRefs) != 0:
logger.debug("Attempting to print " + str(len(unPrintedRefs)) + " unprinted references.")
tmprefs = []
for r in unPrintedRefs:
if not (r.target() not in unPrintedNodes) and not (r.parent() in unPrintedNodes):
if not isinstance(r.parent(), opcua_node_t):
logger.debug("Reference has no parent!")
elif not isinstance(r.parent().id(), opcua_node_id_t):
logger.debug("Parents nodeid is not a nodeID!")
else:
if (len(tmprefs) == 0):
code.append("// Creating leftover references:")
code = code + codegen.getCreateStandaloneReference(r.parent(), r)
code.append("")
tmprefs.append(r)
# Remove printed refs from list
for r in tmprefs:
unPrintedRefs.remove(r)
if len(unPrintedRefs) != 0:
logger.warn("" + str(len(unPrintedRefs)) + " references could not be translated to code.")
else:
logger.debug("Printing succeeded for all references")
code.append("}")
return (header,code)
###
### Testing
###
class testing:
def __init__(self):
self.namespace = opcua_namespace("testing")
logger.debug("Phase 1: Reading XML file nodessets")
self.namespace.parseXML("Opc.Ua.NodeSet2.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part4.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part5.xml")
#self.namespace.parseXML("Opc.Ua.SimulationNodeSet2.xml")
logger.debug("Phase 2: Linking address space references and datatypes")
self.namespace.linkOpenPointers()
self.namespace.sanitize()
logger.debug("Phase 3: Comprehending DataType encoding rules")
self.namespace.buildEncodingRules()
logger.debug("Phase 4: Allocating variable value data")
self.namespace.allocateVariables()
bin = self.namespace.buildBinary()
f = open("binary.base64","w+")
f.write(bin.encode("base64"))
f.close()
allnodes = self.namespace.nodes;
ns = [self.namespace.getRoot()]
i = 0
#print "Starting depth search on " + str(len(allnodes)) + " nodes starting with from " + str(ns)
while (len(ns) < len(allnodes)):
i = i + 1;
tmp = [];
print("Iteration: " + str(i))
for n in ns:
tmp.append(n)
for r in n.getReferences():
if (not r.target() in tmp):
tmp.append(r.target())
print("...tmp, " + str(len(tmp)) + " nodes discovered")
ns = []
for n in tmp:
ns.append(n)
print("...done, " + str(len(ns)) + " nodes discovered")
logger.debug("Phase 5: Printing pretty graph")
self.namespace.printDotGraphWalk(depth=1, rootNode=self.namespace.getNodeByIDString("i=84"), followInverse=False, excludeNodeIds=["i=29", "i=22", "i=25"])
#self.namespace.printDot()
class testing_open62541_header:
def __init__(self):
self.namespace = opcua_namespace("testing")
logger.debug("Phase 1: Reading XML file nodessets")
self.namespace.parseXML("Opc.Ua.NodeSet2.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part4.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part5.xml")
#self.namespace.parseXML("Opc.Ua.SimulationNodeSet2.xml")
logger.debug("Phase 2: Linking address space references and datatypes")
self.namespace.linkOpenPointers()
self.namespace.sanitize()
logger.debug("Phase 3: Calling C Printers")
code = self.namespace.printOpen62541Header()
codeout = open("./open62541_namespace.c", "w+")
for line in code:
codeout.write(line + "\n")
codeout.close()
return
# Call testing routine if invoked standalone.
# For better debugging, it is advised to import this file using an interactive
# python shell and instantiating a namespace.
#
# import ua_types.py as ua; ns=ua.testing().namespace
if __name__ == '__main__':
tst = testing_open62541_header()
| lgpl-3.0 | 1,352,593,816,611,357,700 | 36.582512 | 171 | 0.643772 | false |
zstackio/zstack-woodpecker | integrationtest/vm/monitor/5min_alert_host_all_cpus_idle.py | 2 | 2657 | '''
Test about monitor trigger on all host cpu free ratio in five minutes
@author: Songtao,Haochen
'''
import os
import test_stub
import random
import time
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.monitor_operations as mon_ops
def test():
global trigger
global media
global trigger_action
test_item = "host.cpu.util"
resource_type="HostVO"
host_monitor_item = test_stub.get_monitor_item(resource_type)
if test_item not in host_monitor_item:
test_util.test_fail('%s is not available for monitor' % test_item)
hosts = res_ops.get_resource(res_ops.HOST)
host = hosts[0]
duration = 300
expression = "host.cpu.util{cpu=-1,type=\"idle\"}<40.3"
monitor_trigger = mon_ops.create_monitor_trigger(host.uuid, duration, expression)
send_email = test_stub.create_email_media()
media = send_email.uuid
trigger_action_name = "trigger"+ ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(8)))
trigger = monitor_trigger.uuid
receive_email = os.environ.get('receive_email')
monitor_trigger_action = mon_ops.create_email_monitor_trigger_action(trigger_action_name, send_email.uuid, trigger.split(), receive_email)
trigger_action = monitor_trigger_action.uuid
host.password = os.environ.get('hostPassword')
ssh_cmd = test_stub.ssh_cmd_line(host.managementIp, host.username, host.password, port=int(host.sshPort))
for i in range(4):test_stub.run_all_cpus_load(ssh_cmd)
status_problem, status_ok = test_stub.query_trigger_in_loop(trigger,80)
test_util.action_logger('Trigger old status: %s triggered. Trigger new status: %s recovered' % (status_problem, status_ok ))
if status_problem != 1 or status_ok != 1:
test_util.test_fail('%s Monitor Test failed, expected Problem or OK status not triggered' % test_item)
mail_list = test_stub.receive_email()
keywords = "fired"
mail_flag = test_stub.check_email(mail_list, keywords, trigger, host.uuid)
if mail_flag == 0:
test_util.test_fail('Failed to Get Target: %s for: %s Trigger Mail' % (vm_uuid, test_item))
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media)
def error_cleanup():
global trigger
global media
global trigger_action
mon_ops.delete_monitor_trigger_action(trigger_action)
mon_ops.delete_monitor_trigger(trigger)
mon_ops.delete_email_media(media) | apache-2.0 | 5,195,039,974,565,042,000 | 37.102941 | 142 | 0.694392 | false |
onshape-public/onshape-clients | python/onshape_client/oas/api/revisions_api.py | 1 | 24839 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
# python 2 and python 3 compatibility library
import six
from onshape_client.oas.api_client import ApiClient
from onshape_client.oas.exceptions import ApiTypeError, ApiValueError
from onshape_client.oas.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
int,
none_type,
str,
validate_and_convert_types,
)
from onshape_client.oas.models import bt_list_response_bt_revision_info
from onshape_client.oas.models import bt_revision_info
class RevisionsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __enumerate_revisions(self, cid, **kwargs):
"""enumerate_revisions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enumerate_revisions(cid, async_req=True)
>>> result = thread.get()
Args:
cid (str):
Keyword Args:
element_type (int): [optional]
limit (int): [optional] if omitted the server will use the default value of 20
offset (int): [optional] if omitted the server will use the default value of 0
latest_only (bool): [optional] if omitted the server will use the default value of False
after (int): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_list_response_bt_revision_info.BTListResponseBTRevisionInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["cid"] = cid
return self.call_with_http_info(**kwargs)
self.enumerate_revisions = Endpoint(
settings={
"response_type": (
bt_list_response_bt_revision_info.BTListResponseBTRevisionInfo,
),
"auth": ["OAuth2"],
"endpoint_path": "/api/revisions/companies/{cid}",
"operation_id": "enumerate_revisions",
"http_method": "GET",
"servers": [],
},
params_map={
"all": [
"cid",
"element_type",
"limit",
"offset",
"latest_only",
"after",
],
"required": ["cid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"cid": (str,),
"element_type": (int,),
"limit": (int,),
"offset": (int,),
"latest_only": (bool,),
"after": (int,),
},
"attribute_map": {
"cid": "cid",
"element_type": "elementType",
"limit": "limit",
"offset": "offset",
"latest_only": "latestOnly",
"after": "after",
},
"location_map": {
"cid": "path",
"element_type": "query",
"limit": "query",
"offset": "query",
"latest_only": "query",
"after": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__enumerate_revisions,
)
def __get_latest_in_document_or_company(self, cd, cdid, pnum, **kwargs):
"""get_latest_in_document_or_company # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_latest_in_document_or_company(cd, cdid, pnum, async_req=True)
>>> result = thread.get()
Args:
cd (str):
cdid (str):
pnum (str):
Keyword Args:
et (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_revision_info.BTRevisionInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["cd"] = cd
kwargs["cdid"] = cdid
kwargs["pnum"] = pnum
return self.call_with_http_info(**kwargs)
self.get_latest_in_document_or_company = Endpoint(
settings={
"response_type": (bt_revision_info.BTRevisionInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/revisions/{cd}/{cdid}/p/{pnum}/latest",
"operation_id": "get_latest_in_document_or_company",
"http_method": "GET",
"servers": [],
},
params_map={
"all": ["cd", "cdid", "pnum", "et",],
"required": ["cd", "cdid", "pnum",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"cd": (str,),
"cdid": (str,),
"pnum": (str,),
"et": (str,),
},
"attribute_map": {
"cd": "cd",
"cdid": "cdid",
"pnum": "pnum",
"et": "et",
},
"location_map": {
"cd": "path",
"cdid": "path",
"pnum": "path",
"et": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_latest_in_document_or_company,
)
def __get_revision_history_in_company(self, cid, pnum, **kwargs):
"""get_revision_history_in_company # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_revision_history_in_company(cid, pnum, async_req=True)
>>> result = thread.get()
Args:
cid (str):
pnum (str):
Keyword Args:
element_type (str): [optional]
fill_approvers (bool): [optional] if omitted the server will use the default value of False
fill_export_permission (bool): [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_list_response_bt_revision_info.BTListResponseBTRevisionInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["cid"] = cid
kwargs["pnum"] = pnum
return self.call_with_http_info(**kwargs)
self.get_revision_history_in_company = Endpoint(
settings={
"response_type": (
bt_list_response_bt_revision_info.BTListResponseBTRevisionInfo,
),
"auth": ["OAuth2"],
"endpoint_path": "/api/revisions/companies/{cid}/partnumber/{pnum}",
"operation_id": "get_revision_history_in_company",
"http_method": "GET",
"servers": [],
},
params_map={
"all": [
"cid",
"pnum",
"element_type",
"fill_approvers",
"fill_export_permission",
],
"required": ["cid", "pnum",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"cid": (str,),
"pnum": (str,),
"element_type": (str,),
"fill_approvers": (bool,),
"fill_export_permission": (bool,),
},
"attribute_map": {
"cid": "cid",
"pnum": "pnum",
"element_type": "elementType",
"fill_approvers": "fillApprovers",
"fill_export_permission": "fillExportPermission",
},
"location_map": {
"cid": "path",
"pnum": "path",
"element_type": "query",
"fill_approvers": "query",
"fill_export_permission": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_revision_history_in_company,
)
class Endpoint(object):
def __init__(
self,
settings=None,
params_map=None,
root_map=None,
headers_map=None,
api_client=None,
callable=None,
):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map["all"].extend(
[
"async_req",
"_host_index",
"_preload_content",
"_request_timeout",
"_return_http_data_only",
"_check_input_type",
"_check_return_type",
]
)
self.params_map["nullable"].extend(["_request_timeout"])
self.validations = root_map["validations"]
self.allowed_values = root_map["allowed_values"]
self.openapi_types = root_map["openapi_types"]
extra_types = {
"async_req": (bool,),
"_host_index": (int,),
"_preload_content": (bool,),
"_request_timeout": (none_type, int, (int,), [int]),
"_return_http_data_only": (bool,),
"_check_input_type": (bool,),
"_check_return_type": (bool,),
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map["attribute_map"]
self.location_map = root_map["location_map"]
self.collection_format_map = root_map["collection_format_map"]
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map["enum"]:
if param in kwargs:
check_allowed_values(self.allowed_values, (param,), kwargs[param])
for param in self.params_map["validation"]:
if param in kwargs:
check_validations(self.validations, (param,), kwargs[param])
if kwargs["_check_input_type"] is False:
return
for key, value in six.iteritems(kwargs):
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs["_check_input_type"],
configuration=self.api_client.configuration,
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
"body": None,
"collection_format": {},
"file": {},
"form": [],
"header": {},
"path": {},
"query": [],
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == "body":
params["body"] = param_value
continue
base_name = self.attribute_map[param_name]
if param_location == "form" and self.openapi_types[param_name] == (
file_type,
):
params["file"][param_name] = [param_value]
elif param_location == "form" and self.openapi_types[param_name] == (
[file_type],
):
# param_value is already a list
params["file"][param_name] = param_value
elif param_location in {"form", "query"}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {"form", "query"}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params["collection_format"][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
pet_api = PetApi()
pet_api.add_pet # this is an instance of the class Endpoint
pet_api.add_pet() # this invokes pet_api.add_pet.__call__()
which then invokes the callable functions stored in that endpoint at
pet_api.add_pet.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
_host = self.settings["servers"][kwargs["_host_index"]]
except IndexError:
if self.settings["servers"]:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s"
% len(self.settings["servers"])
)
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map["all"]:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" % (key, self.settings["operation_id"])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (
key not in self.params_map["nullable"]
and value is None
and kwargs["_check_input_type"] is False
):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" % (key, self.settings["operation_id"])
)
for key in self.params_map["required"]:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings["operation_id"])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map["accept"]
if accept_headers_list:
params["header"]["Accept"] = self.api_client.select_header_accept(
accept_headers_list
)
content_type_headers_list = self.headers_map["content_type"]
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list
)
params["header"]["Content-Type"] = header_list
return self.api_client.call_api(
self.settings["endpoint_path"],
self.settings["http_method"],
params["path"],
params["query"],
params["header"],
body=params["body"],
post_params=params["form"],
files=params["file"],
response_type=self.settings["response_type"],
auth_settings=self.settings["auth"],
async_req=kwargs["async_req"],
_check_type=kwargs["_check_return_type"],
_return_http_data_only=kwargs["_return_http_data_only"],
_preload_content=kwargs["_preload_content"],
_request_timeout=kwargs["_request_timeout"],
_host=_host,
collection_formats=params["collection_format"],
)
| mit | -5,475,035,089,580,236,000 | 40.056198 | 115 | 0.494424 | false |
2coding/Codec | thirdparty/gtest-1.6.0/test/gtest_test_utils.py | 5 | 10742 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause | -5,137,043,881,937,884,000 | 33.219672 | 79 | 0.653696 | false |
toshywoshy/ansible | lib/ansible/modules/network/cloudengine/ce_sflow.py | 11 | 47224 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_sflow
version_added: "2.4"
short_description: Manages sFlow configuration on HUAWEI CloudEngine switches.
description:
- Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time,
detect abnormal traffic, and locate the source of attack traffic,
ensuring stable running of the network.
author: QijunPan (@QijunPan)
notes:
- This module requires the netconf system service be enabled on the remote device being managed.
- Recommended connection is C(netconf).
- This module also works with C(local) connections for legacy playbooks.
options:
agent_ip:
description:
- Specifies the IPv4/IPv6 address of an sFlow agent.
source_ip:
description:
- Specifies the source IPv4/IPv6 address of sFlow packets.
collector_id:
description:
- Specifies the ID of an sFlow collector. This ID is used when you specify
the collector in subsequent sFlow configuration.
choices: ['1', '2']
collector_ip:
description:
- Specifies the IPv4/IPv6 address of the sFlow collector.
collector_ip_vpn:
description:
- Specifies the name of a VPN instance.
The value is a string of 1 to 31 case-sensitive characters, spaces not supported.
When double quotation marks are used around the string, spaces are allowed in the string.
The value C(_public_) is reserved and cannot be used as the VPN instance name.
collector_datagram_size:
description:
- Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector.
The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400.
collector_udp_port:
description:
- Specifies the UDP destination port number of sFlow packets.
The value is an integer that ranges from 1 to 65535. The default value is 6343.
collector_meth:
description:
- Configures the device to send sFlow packets through service interfaces,
enhancing the sFlow packet forwarding capability.
The enhanced parameter is optional. No matter whether you configure the enhanced mode,
the switch determines to send sFlow packets through service cards or management port
based on the routing information on the collector.
When the value is meth, the device forwards sFlow packets at the control plane.
When the value is enhanced, the device forwards sFlow packets at the forwarding plane to
enhance the sFlow packet forwarding capacity.
choices: ['meth', 'enhanced']
collector_description:
description:
- Specifies the description of an sFlow collector.
The value is a string of 1 to 255 case-sensitive characters without spaces.
sflow_interface:
description:
- Full name of interface for Flow Sampling or Counter.
It must be a physical interface, Eth-Trunk, or Layer 2 subinterface.
sample_collector:
description:
- Indicates the ID list of the collector.
sample_rate:
description:
- Specifies the flow sampling rate in the format 1/rate.
The value is an integer and ranges from 1 to 4294967295. The default value is 8192.
sample_length:
description:
- Specifies the maximum length of sampled packets.
The value is an integer and ranges from 18 to 512, in bytes. The default value is 128.
sample_direction:
description:
- Enables flow sampling in the inbound or outbound direction.
choices: ['inbound', 'outbound', 'both']
counter_collector:
description:
- Indicates the ID list of the counter collector.
counter_interval:
description:
- Indicates the counter sampling interval.
The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20.
export_route:
description:
- Configures the sFlow packets sent by the switch not to carry routing information.
choices: ['enable', 'disable']
rate_limit:
description:
- Specifies the rate of sFlow packets sent from a card to the control plane.
The value is an integer that ranges from 100 to 1500, in pps.
version_added: "2.10"
type: str
rate_limit_slot:
description:
- Specifies the slot where the rate of output sFlow packets is limited.
If this parameter is not specified, the rate of sFlow packets sent from
all cards to the control plane is limited.
The value is an integer or a string of characters.
version_added: "2.10"
type: str
forward_enp_slot:
description:
- Enable the Embedded Network Processor (ENP) chip function.
The switch uses the ENP chip to perform sFlow sampling,
and the maximum sFlow sampling interval is 65535.
If you set the sampling interval to be larger than 65535,
the switch automatically restores it to 65535.
The value is an integer or 'all'.
version_added: "2.10"
type: str
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = '''
---
- name: sflow module test
hosts: ce128
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configuring sFlow Agent
ce_sflow:
agent_ip: 6.6.6.6
provider: '{{ cli }}'
- name: Configuring sFlow Collector
ce_sflow:
collector_id: 1
collector_ip: 7.7.7.7
collector_ip_vpn: vpn1
collector_description: Collector1
provider: '{{ cli }}'
- name: Configure flow sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
sample_collector: 1
sample_direction: inbound
provider: '{{ cli }}'
- name: Configure counter sampling.
ce_sflow:
sflow_interface: 10GE2/0/2
counter_collector: 1
counter_interval: 1000
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"agent_ip": "6.6.6.6", "state": "present"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"agent": {}}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["sflow agent ip 6.6.6.6"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
CE_NC_GET_SFLOW = """
<filter type="subtree">
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<sources>
<source>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</source>
</sources>
<agents>
<agent>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
</agent>
</agents>
<collectors>
<collector>
<collectorID></collectorID>
<family></family>
<ipv4Addr></ipv4Addr>
<ipv6Addr></ipv6Addr>
<vrfName></vrfName>
<datagramSize></datagramSize>
<port></port>
<description></description>
<meth></meth>
</collector>
</collectors>
<samplings>
<sampling>
<ifName>%s</ifName>
<collectorID></collectorID>
<direction></direction>
<length></length>
<rate></rate>
</sampling>
</samplings>
<counters>
<counter>
<ifName>%s</ifName>
<collectorID></collectorID>
<interval></interval>
</counter>
</counters>
<exports>
<export>
<ExportRoute></ExportRoute>
</export>
</exports>
</sflow>
</filter>
"""
def is_config_exist(cmp_cfg, test_cfg):
"""is configuration exist?"""
if not cmp_cfg or not test_cfg:
return False
return bool(test_cfg in cmp_cfg)
def is_valid_ip_vpn(vpname):
"""check ip vpn"""
if not vpname:
return False
if vpname == "_public_":
return False
if len(vpname) < 1 or len(vpname) > 31:
return False
return True
def get_ip_version(address):
"""get ip version fast"""
if not address:
return None
if address.count(':') >= 2 and address.count(":") <= 7:
return "ipv6"
elif address.count('.') == 3:
return "ipv4"
else:
return None
def get_interface_type(interface):
"""get the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
class Sflow(object):
"""Manages sFlow"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.agent_ip = self.module.params['agent_ip']
self.agent_version = None
self.source_ip = self.module.params['source_ip']
self.source_version = None
self.export_route = self.module.params['export_route']
self.rate_limit = self.module.params['rate_limit']
self.rate_limit_slot = self.module.params['rate_limit_slot']
self.forward_enp_slot = self.module.params['forward_enp_slot']
self.collector_id = self.module.params['collector_id']
self.collector_ip = self.module.params['collector_ip']
self.collector_version = None
self.collector_ip_vpn = self.module.params['collector_ip_vpn']
self.collector_datagram_size = self.module.params['collector_datagram_size']
self.collector_udp_port = self.module.params['collector_udp_port']
self.collector_meth = self.module.params['collector_meth']
self.collector_description = self.module.params['collector_description']
self.sflow_interface = self.module.params['sflow_interface']
self.sample_collector = self.module.params['sample_collector'] or list()
self.sample_rate = self.module.params['sample_rate']
self.sample_length = self.module.params['sample_length']
self.sample_direction = self.module.params['sample_direction']
self.counter_collector = self.module.params['counter_collector'] or list()
self.counter_interval = self.module.params['counter_interval']
self.state = self.module.params['state']
# state
self.config = "" # current config
self.sflow_dict = dict()
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def __init_module__(self):
"""init module"""
required_together = [("collector_id", "collector_ip")]
self.module = AnsibleModule(
argument_spec=self.spec, required_together=required_together, supports_check_mode=True)
def check_response(self, con_obj, xml_name):
"""Check if response message is already succeed"""
xml_str = con_obj.xml
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def netconf_set_config(self, xml_str, xml_name):
"""netconf set config"""
rcv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in rcv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_sflow_dict(self):
""" sflow config dict"""
sflow_dict = dict(source=list(), agent=dict(), collector=list(),
sampling=dict(), counter=dict(), export=dict())
conf_str = CE_NC_GET_SFLOW % (
self.sflow_interface, self.sflow_interface)
if not self.collector_meth:
conf_str = conf_str.replace("<meth></meth>", "")
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return sflow_dict
xml_str = rcv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
# get source info
srcs = root.findall("sflow/sources/source")
if srcs:
for src in srcs:
attrs = dict()
for attr in src:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
attrs[attr.tag] = attr.text
sflow_dict["source"].append(attrs)
# get agent info
agent = root.find("sflow/agents/agent")
if agent:
for attr in agent:
if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]:
sflow_dict["agent"][attr.tag] = attr.text
# get collector info
collectors = root.findall("sflow/collectors/collector")
if collectors:
for collector in collectors:
attrs = dict()
for attr in collector:
if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr",
"vrfName", "datagramSize", "port", "description", "meth"]:
attrs[attr.tag] = attr.text
sflow_dict["collector"].append(attrs)
# get sampling info
sample = root.find("sflow/samplings/sampling")
if sample:
for attr in sample:
if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]:
sflow_dict["sampling"][attr.tag] = attr.text
# get counter info
counter = root.find("sflow/counters/counter")
if counter:
for attr in counter:
if attr.tag in ["ifName", "collectorID", "interval"]:
sflow_dict["counter"][attr.tag] = attr.text
# get export info
export = root.find("sflow/exports/export")
if export:
for attr in export:
if attr.tag == "ExportRoute":
sflow_dict["export"][attr.tag] = attr.text
return sflow_dict
def config_agent(self):
"""configures sFlow agent"""
xml_str = ''
if not self.agent_ip:
return xml_str
self.agent_version = get_ip_version(self.agent_ip)
if not self.agent_version:
self.module.fail_json(msg="Error: agent_ip is invalid.")
if self.state == "present":
if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \
and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="merge">'
xml_str += '<family>%s</family>' % self.agent_version
if self.agent_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ip %s" % self.agent_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.agent_ip
self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip)
xml_str += '</agent></agents>'
else:
if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr") \
or self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"):
xml_str += '<agents><agent operation="delete"></agent></agents>'
self.updates_cmd.append("undo sflow agent")
return xml_str
def config_source(self):
"""configures the source IP address for sFlow packets"""
xml_str = ''
if not self.source_ip:
return xml_str
self.source_version = get_ip_version(self.source_ip)
if not self.source_version:
self.module.fail_json(msg="Error: source_ip is invalid.")
src_dict = dict()
for src in self.sflow_dict["source"]:
if src.get("family") == self.source_version:
src_dict = src
break
if self.state == "present":
if self.source_ip != src_dict.get("ipv4Addr") \
and self.source_ip != src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="merge">'
xml_str += '<family>%s</family>' % self.source_version
if self.source_version == "ipv4":
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.source_ip
self.updates_cmd.append("sflow source ip %s" % self.source_ip)
else:
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.source_ip
self.updates_cmd.append(
"sflow source ipv6 %s" % self.source_ip)
xml_str += '</source ></sources>'
else:
if self.source_ip == src_dict.get("ipv4Addr"):
xml_str += '<sources><source operation="delete"><family>ipv4</family></source ></sources>'
self.updates_cmd.append("undo sflow source ip %s" % self.source_ip)
elif self.source_ip == src_dict.get("ipv6Addr"):
xml_str += '<sources><source operation="delete"><family>ipv6</family></source ></sources>'
self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip)
return xml_str
def config_collector(self):
"""creates an sFlow collector and sets or modifies optional parameters for the sFlow collector"""
xml_str = ''
if not self.collector_id:
return xml_str
if self.state == "present" and not self.collector_ip:
return xml_str
if self.collector_ip:
self.collector_version = get_ip_version(self.collector_ip)
if not self.collector_version:
self.module.fail_json(msg="Error: collector_ip is invalid.")
# get collector dict
exist_dict = dict()
for collector in self.sflow_dict["collector"]:
if collector.get("collectorID") == self.collector_id:
exist_dict = collector
break
change = False
if self.state == "present":
if not exist_dict:
change = True
elif self.collector_version != exist_dict.get("family"):
change = True
elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
change = True
elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
change = True
elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
change = True
elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_":
change = True
elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
change = True
elif not self.collector_udp_port and exist_dict.get("port") != "6343":
change = True
elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
change = True
elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400":
change = True
elif self.collector_meth and self.collector_meth != exist_dict.get("meth"):
change = True
elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth":
change = True
elif self.collector_description and self.collector_description != exist_dict.get("description"):
change = True
elif not self.collector_description and exist_dict.get("description"):
change = True
else:
pass
else: # absent
# collector not exist
if not exist_dict:
return xml_str
if self.collector_version and self.collector_version != exist_dict.get("family"):
return xml_str
if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"):
return xml_str
if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"):
return xml_str
if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"):
return xml_str
if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"):
return xml_str
if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"):
return xml_str
if self.collector_meth and self.collector_meth != exist_dict.get("meth"):
return xml_str
if self.collector_description and self.collector_description != exist_dict.get("description"):
return xml_str
change = True
if not change:
return xml_str
# update or delete
if self.state == "absent":
xml_str += '<collectors><collector operation="delete"><collectorID>%s</collectorID>' % self.collector_id
self.updates_cmd.append("undo collector %s" % self.collector_id)
else:
xml_str += '<collectors><collector operation="merge"><collectorID>%s</collectorID>' % self.collector_id
cmd = "sflow collector %s" % self.collector_id
xml_str += '<family>%s</family>' % self.collector_version
if self.collector_version == "ipv4":
cmd += " ip %s" % self.collector_ip
xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.collector_ip
else:
cmd += " ipv6 %s" % self.collector_ip
xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.collector_ip
if self.collector_ip_vpn:
cmd += " vpn-instance %s" % self.collector_ip_vpn
xml_str += '<vrfName>%s</vrfName>' % self.collector_ip_vpn
if self.collector_datagram_size:
cmd += " length %s" % self.collector_datagram_size
xml_str += '<datagramSize>%s</datagramSize>' % self.collector_datagram_size
if self.collector_udp_port:
cmd += " udp-port %s" % self.collector_udp_port
xml_str += '<port>%s</port>' % self.collector_udp_port
if self.collector_description:
cmd += " description %s" % self.collector_description
xml_str += '<description>%s</description>' % self.collector_description
else:
xml_str += '<description></description>'
if self.collector_meth:
if self.collector_meth == "enhanced":
cmd += " enhanced"
xml_str += '<meth>%s</meth>' % self.collector_meth
self.updates_cmd.append(cmd)
xml_str += "</collector></collectors>"
return xml_str
def config_sampling(self):
"""configure sflow sampling on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["sampling"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<samplings><sampling operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<samplings><sampling operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# sample_collector
if self.sample_collector:
if self.sflow_dict["sampling"].get("collectorID") \
and self.sflow_dict["sampling"].get("collectorID") != "invalid":
existing = self.sflow_dict["sampling"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.sample_collector) - set(existing))
if diff:
self.updates_cmd.append(
"sflow sampling collector %s" % ' '.join(diff))
new_set = list(self.sample_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.sample_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow sampling collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# sample_rate
if self.sample_rate:
exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling rate %s" % self.sample_rate)
xml_str += '<rate>%s</rate>' % self.sample_rate
# sample_length
if self.sample_length:
exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow sampling length %s" % self.sample_length)
xml_str += '<length>%s</length>' % self.sample_length
# sample_direction
if self.sample_direction:
direction = list()
if self.sample_direction == "both":
direction = ["inbound", "outbound"]
else:
direction.append(self.sample_direction)
existing = list()
if self.sflow_dict["sampling"].get("direction"):
if self.sflow_dict["sampling"].get("direction") == "both":
existing = ["inbound", "outbound"]
else:
existing.append(
self.sflow_dict["sampling"].get("direction"))
if self.state == "present":
diff = list(set(direction) - set(existing))
if diff:
new_set = list(set(direction + existing))
self.updates_cmd.append(
"sflow sampling %s" % ' '.join(diff))
if len(new_set) > 1:
new_dir = "both"
else:
new_dir = new_set[0]
xml_str += '<direction>%s</direction>' % new_dir
else:
same = list(set(existing) & set(direction))
if same:
self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same))
if len(same) > 1:
del_dir = "both"
else:
del_dir = same[0]
xml_str += '<direction>%s</direction>' % del_dir
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</sampling></samplings>'
return xml_str
def config_counter(self):
"""configures sflow counter on an interface"""
xml_str = ''
if not self.sflow_interface:
return xml_str
if not self.sflow_dict["counter"] and self.state == "absent":
return xml_str
self.updates_cmd.append("interface %s" % self.sflow_interface)
if self.state == "present":
xml_str += '<counters><counter operation="merge"><ifName>%s</ifName>' % self.sflow_interface
else:
xml_str += '<counters><counter operation="delete"><ifName>%s</ifName>' % self.sflow_interface
# counter_collector
if self.counter_collector:
if self.sflow_dict["counter"].get("collectorID") \
and self.sflow_dict["counter"].get("collectorID") != "invalid":
existing = self.sflow_dict["counter"].get("collectorID").split(',')
else:
existing = list()
if self.state == "present":
diff = list(set(self.counter_collector) - set(existing))
if diff:
self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff))
new_set = list(self.counter_collector + existing)
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set)))
else:
same = list(set(self.counter_collector) & set(existing))
if same:
self.updates_cmd.append(
"undo sflow counter collector %s" % ' '.join(same))
xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same)))
# counter_interval
if self.counter_interval:
exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval"))
if self.state == "present" and not exist:
self.updates_cmd.append(
"sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
elif self.state == "absent" and exist:
self.updates_cmd.append(
"undo sflow counter interval %s" % self.counter_interval)
xml_str += '<interval>%s</interval>' % self.counter_interval
if xml_str.endswith("</ifName>"):
self.updates_cmd.pop()
return ""
xml_str += '</counter></counters>'
return xml_str
def config_export(self):
"""configure sflow export"""
xml_str = ''
if not self.export_route:
return xml_str
if self.export_route == "enable":
if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable":
xml_str = '<exports><export operation="delete"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("undo sflow export extended-route-data disable")
else: # disable
if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable":
xml_str = '<exports><export operation="create"><ExportRoute>disable</ExportRoute></export></exports>'
self.updates_cmd.append("sflow export extended-route-data disable")
return xml_str
def netconf_load_config(self, xml_str):
"""load sflow config by netconf"""
if not xml_str:
return
xml_cfg = """
<config>
<sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
%s
</sflow>
</config>""" % xml_str
self.netconf_set_config(xml_cfg, "SET_SFLOW")
self.changed = True
def check_params(self):
"""Check all input params"""
# check agent_ip
if self.agent_ip:
self.agent_ip = self.agent_ip.upper()
if not check_ip_addr(self.agent_ip):
self.module.fail_json(msg="Error: agent_ip is invalid.")
# check source_ip
if self.source_ip:
self.source_ip = self.source_ip.upper()
if not check_ip_addr(self.source_ip):
self.module.fail_json(msg="Error: source_ip is invalid.")
# check collector
if self.collector_id:
# check collector_ip and collector_ip_vpn
if self.collector_ip:
self.collector_ip = self.collector_ip.upper()
if not check_ip_addr(self.collector_ip):
self.module.fail_json(
msg="Error: collector_ip is invalid.")
if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn):
self.module.fail_json(
msg="Error: collector_ip_vpn is invalid.")
# check collector_datagram_size ranges from 1024 to 8100
if self.collector_datagram_size:
if not self.collector_datagram_size.isdigit():
self.module.fail_json(
msg="Error: collector_datagram_size is not digit.")
if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100:
self.module.fail_json(
msg="Error: collector_datagram_size is not ranges from 1024 to 8100.")
# check collector_udp_port ranges from 1 to 65535
if self.collector_udp_port:
if not self.collector_udp_port.isdigit():
self.module.fail_json(
msg="Error: collector_udp_port is not digit.")
if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535:
self.module.fail_json(
msg="Error: collector_udp_port is not ranges from 1 to 65535.")
# check collector_description 1 to 255 case-sensitive characters
if self.collector_description:
if self.collector_description.count(" "):
self.module.fail_json(
msg="Error: collector_description should without spaces.")
if len(self.collector_description) < 1 or len(self.collector_description) > 255:
self.module.fail_json(
msg="Error: collector_description is not ranges from 1 to 255.")
# check sflow_interface
if self.sflow_interface:
intf_type = get_interface_type(self.sflow_interface)
if not intf_type:
self.module.fail_json(msg="Error: intf_type is invalid.")
if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']:
self.module.fail_json(
msg="Error: interface %s is not support sFlow." % self.sflow_interface)
# check sample_collector
if self.sample_collector:
self.sample_collector.sort()
if self.sample_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: sample_collector is invalid.")
# check sample_rate ranges from 1 to 4294967295
if self.sample_rate:
if not self.sample_rate.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295:
self.module.fail_json(
msg="Error: sample_rate is not ranges from 1 to 4294967295.")
# check sample_length ranges from 18 to 512
if self.sample_length:
if not self.sample_length.isdigit():
self.module.fail_json(
msg="Error: sample_rate is not digit.")
if int(self.sample_length) < 18 or int(self.sample_length) > 512:
self.module.fail_json(
msg="Error: sample_length is not ranges from 18 to 512.")
# check counter_collector
if self.counter_collector:
self.counter_collector.sort()
if self.counter_collector not in [["1"], ["2"], ["1", "2"]]:
self.module.fail_json(
msg="Error: counter_collector is invalid.")
# counter_interval ranges from 10 to 4294967295
if self.counter_interval:
if not self.counter_interval.isdigit():
self.module.fail_json(
msg="Error: counter_interval is not digit.")
if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295:
self.module.fail_json(
msg="Error: sample_length is not ranges from 10 to 4294967295.")
if self.rate_limit or self.rate_limit_slot or self.forward_enp_slot:
self.module.fail_json(msg="Error: The following parameters cannot be configured"
"because XML mode is not supported:rate_limit,rate_limit_slot,forward_enp_slot.")
def get_proposed(self):
"""get proposed info"""
# base config
if self.agent_ip:
self.proposed["agent_ip"] = self.agent_ip
if self.source_ip:
self.proposed["source_ip"] = self.source_ip
if self.export_route:
self.proposed["export_route"] = self.export_route
if self.rate_limit:
self.proposed["rate_limit"] = self.rate_limit
self.proposed["rate_limit_slot"] = self.rate_limit_slot
if self.forward_enp_slot:
self.proposed["forward_enp_slot"] = self.forward_enp_slot
if self.collector_id:
self.proposed["collector_id"] = self.collector_id
if self.collector_ip:
self.proposed["collector_ip"] = self.collector_ip
self.proposed["collector_ip_vpn"] = self.collector_ip_vpn
if self.collector_datagram_size:
self.proposed[
"collector_datagram_size"] = self.collector_datagram_size
if self.collector_udp_port:
self.proposed["collector_udp_port"] = self.collector_udp_port
if self.collector_meth:
self.proposed["collector_meth"] = self.collector_meth
if self.collector_description:
self.proposed[
"collector_description"] = self.collector_description
# sample and counter config
if self.sflow_interface:
self.proposed["sflow_interface"] = self.sflow_interface
if self.sample_collector:
self.proposed["sample_collector"] = self.sample_collector
if self.sample_rate:
self.proposed["sample_rate"] = self.sample_rate
if self.sample_length:
self.proposed["sample_length"] = self.sample_length
if self.sample_direction:
self.proposed["sample_direction"] = self.sample_direction
if self.counter_collector:
self.proposed["counter_collector"] = self.counter_collector
if self.counter_interval:
self.proposed["counter_interval"] = self.counter_interval
self.proposed["state"] = self.state
def get_existing(self):
"""get existing info"""
if not self.sflow_dict:
return
if self.agent_ip:
self.existing["agent"] = self.sflow_dict["agent"]
if self.source_ip:
self.existing["source"] = self.sflow_dict["source"]
if self.collector_id:
self.existing["collector"] = self.sflow_dict["collector"]
if self.export_route:
self.existing["export"] = self.sflow_dict["export"]
if self.sflow_interface:
self.existing["sampling"] = self.sflow_dict["sampling"]
self.existing["counter"] = self.sflow_dict["counter"]
def get_end_state(self):
"""get end state info"""
sflow_dict = self.get_sflow_dict()
if not sflow_dict:
return
if self.agent_ip:
self.end_state["agent"] = sflow_dict["agent"]
if self.source_ip:
self.end_state["source"] = sflow_dict["source"]
if self.collector_id:
self.end_state["collector"] = sflow_dict["collector"]
if self.export_route:
self.end_state["export"] = sflow_dict["export"]
if self.sflow_interface:
self.end_state["sampling"] = sflow_dict["sampling"]
self.end_state["counter"] = sflow_dict["counter"]
if self.existing == self.end_state:
self.changed = False
def work(self):
"""worker"""
self.check_params()
self.sflow_dict = self.get_sflow_dict()
self.get_existing()
self.get_proposed()
# deal present or absent
xml_str = ''
if self.export_route:
xml_str += self.config_export()
if self.agent_ip:
xml_str += self.config_agent()
if self.source_ip:
xml_str += self.config_source()
if self.state == "present":
if self.collector_id and self.collector_ip:
xml_str += self.config_collector()
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
else:
if self.sflow_interface:
xml_str += self.config_sampling()
xml_str += self.config_counter()
if self.collector_id:
xml_str += self.config_collector()
if xml_str:
self.netconf_load_config(xml_str)
self.changed = True
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
agent_ip=dict(required=False, type='str'),
source_ip=dict(required=False, type='str'),
export_route=dict(required=False, type='str',
choices=['enable', 'disable']),
rate_limit=dict(required=False, removed_in_version=2.13, type='str'),
rate_limit_slot=dict(required=False, removed_in_version=2.13, type='str'),
forward_enp_slot=dict(required=False, removed_in_version=2.13, type='str'),
collector_id=dict(required=False, type='str', choices=['1', '2']),
collector_ip=dict(required=False, type='str'),
collector_ip_vpn=dict(required=False, type='str'),
collector_datagram_size=dict(required=False, type='str'),
collector_udp_port=dict(required=False, type='str'),
collector_meth=dict(required=False, type='str',
choices=['meth', 'enhanced']),
collector_description=dict(required=False, type='str'),
sflow_interface=dict(required=False, type='str'),
sample_collector=dict(required=False, type='list'),
sample_rate=dict(required=False, type='str'),
sample_length=dict(required=False, type='str'),
sample_direction=dict(required=False, type='str',
choices=['inbound', 'outbound', 'both']),
counter_collector=dict(required=False, type='list'),
counter_interval=dict(required=False, type='str'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = Sflow(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,787,498,711,970,452,700 | 39.431507 | 119 | 0.562765 | false |
saifulazad/myApp | app/views.py | 1 | 7289 | from datetime import datetime
from flask import render_template, session, request, redirect
from flask.helpers import url_for
from flask_login import login_user, logout_user, login_required
from DropDown import *
from ExamQuestions import *
from UploadQuestion import *
from app import app, login_manager
from app.PhotoHandler import Photo
from app.ResultProcessor import ResultProcessor
from profile import *
@login_manager.user_loader
def load_user(id):
return Registertable.query.get(int(id))
# @csrf.error_handler
# def csrf_error(reason):
# print 'OOOOOOOOOOOOO'
# return render_template('csrf_error.html', reason=reason), 400
@login_manager.unauthorized_handler
def unauthorized():
return redirect(url_for('login'))
@app.route('/profile', methods=['GET'])
@login_required
def profile():
# form = UserProfile(request.form)
# generating random numbers
solved = random.sample(range(1, 100), 13)
tried = random.sample(range(1, 100), 23)
user = User_profile(session['user_id'])
return render_template('profile.html', user=user, solved=solved, tried=tried)
@app.route('/upload', methods=['GET', 'POST'])
@login_required
def upload():
form = QuestionForm(request.form)
category = CategorieDropdown()
correctanswer = CorrctAnswerDropdown()
if request.method == 'POST' and form.validate_on_submit():
print request.method
# questionID = db.Column(db.Integer,primary_key=True)
# description = db.Column(db.String(300))
# categoryID = db.Column(db.Integer, db.ForeignKey('category.ID'))
# userID = db.Column(db.Integer, db.ForeignKey('registertable.ID'))
# option1 = db.Column(db.String(100))
# option2 = db.Column(db.String(100))
# option3 = db.Column(db.String(100))
# option4 = db.Column(db.String(100))
# correctAnswer = db.Column(db.Integer)
# hint = db.Column(db.String(300))
# solvedUser = db.Column(db.Integer)
# failedUser = db.Column(db.Integer)
# imgURL = db.Column(db.String(300))
# dateUploaded = db.Column(db.DateTime)
print request.form[category.select_liertal]
question = Question(description=form.description.data,
option1=form.option1.data, option2=form.option2.data,
option3=form.option3.data, option4=form.option4.data,
correctAnswer=request.form[correctanswer.select_liertal],
categoryID=request.form[category.select_liertal],
userID=session['user_id'], hint="None", solvedUser=0,
failedUser=0, dateUploaded=datetime.now(),
imgURL='/')
db.session.add(question)
db.session.commit()
return redirect(url_for('upload'))
return render_template('upload.html', form=form, categories=category, correctanswer=correctanswer)
@app.route('/submit', methods=['GET', 'POST'])
@login_required
def Next():
user_value = request.form.getlist('option1')
if len(user_value):
pair = (request.form['questionID'], user_value[0])
else:
pair = (request.form['questionID'], 0)
try:
val = session['tmpquestionsID'].pop(0)
print session['tmpquestionsID']
session['useranswer'].append(pair)
return redirect(url_for('QuestionID', id=val))
except:
session['useranswer'].append(pair)
print session['useranswer']
return redirect(url_for('Result'))
# else:
#
@app.route('/result', methods=['GET'])
@login_required
def Result():
ob = ResultProcessor(session['user_id'])
result = ob.get_user_answer(session['useranswer'])
session.pop('useranswer', None)
session.pop('tmpquestionsID', None)
session.pop('questionsID', None)
return render_template("result.html", result=result)
@app.route('/category', methods=['GET'])
@login_required
def category():
english_categories = CategorieDropdown()
print english_categories.select_liertal
print english_categories.pair
return render_template('category.html', english_categories=english_categories)
@app.route('/exam', methods=['GET', 'POST'])
@login_required
def Exam():
english_categories = CategorieDropdown()
session['questionsID'] = ExamQuestions().get_all()
session['tmpquestionsID'] = session['questionsID']
session['useranswer'] = []
return redirect(url_for('QuestionID', id=session['tmpquestionsID'].pop(0)))
@app.route('/', methods=['GET', 'POST'])
def login():
form = Login()
if request.method == 'POST' and form.validate_on_submit():
login_user(form.user)
print form.user.email
return redirect(url_for('upload'))
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
#
@app.route('/questions/<id>', methods=['GET'])
def QuestionID(id):
question = Question.query.filter_by(questionID=id).first()
# root_path = str(request.path).rsplit('/',1)[0]
#
# user_value = request.form.getlist('option1')
# if( len(user_value)):
# print user_value[0]
# print question.correctAnswer
# ln = len(Questiontable.query.all())
# path = '/'.join([root_path,str(int(id)%ln+1)])
#
# print path
# str(1)
return render_template('question.html', question=question)
# route to register
@app.route('/reg', methods=['GET', 'POST'])
def Register():
# caps = Captcha.query.all()
#
#
# cap_img = [caps[random.randrange(0,2)] for x in range(3)]
# d = []
# for x in cap_img:
# d.append ((x.ID , x.src))
# cap_img = d
# return render_template("captcha.html",cap_img=d)
schools = SchoolDropdown()
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
file = request.files['file']
photo = Photo(file)
url = photo.save(form.User_Id.data)
user = Registertable(name=form.Name.data, email=form.Email.data,
userID=form.User_Id.data, schoolID=request.form[schools.select_liertal],
password=form.Password.data, gender=form.gender.data, imgURL=url,
unsolved=0, solved=0)
db.session.add(user)
db.session.commit()
return render_template(redirect(url_for("login")))
# print schools.select_liertal
return render_template('reg.html', form=form, schools=schools)
# from werkzeug import secure_filename
# from flask_wtf.file import FileField
# @app.route('/up', methods=['GET', 'POST'])
# def upload_file():
# WTF_CSRF_ENABLED = False
# if request.method == 'POST':
# file = request.files['file']
# print file
# if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('uploaded_file',
# filename=filename))
# return render_template('up.html')
| bsd-3-clause | 8,507,540,077,735,026,000 | 31.540179 | 102 | 0.625051 | false |
kinverarity1/las-reader | docs/source/conf.py | 1 | 1636 | import sys
import os
import shlex
sys.path.insert(0, os.path.abspath("."))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"IPython.sphinxext.ipython_console_highlighting",
]
templates_path = ["_templates"]
autodoc_member_order = "bysource"
source_suffix = ".rst"
master_doc = "index"
project = u"lasio"
copyright = u"2013-2020, Kent Inverarity and contributors"
author = u"Kent Inverarity and contributors"
from pkg_resources import get_distribution
release = get_distribution("lasio").version
version = ".".join(release.split(".")[:2])
language = None
exclude_patterns = []
pygments_style = "default"
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
html_theme_options = {}
htmlhelp_basename = "lasiodoc"
latex_elements = {}
latex_documents = [
(
master_doc,
"lasio.tex",
u"lasio Documentation",
u"Kent Inverarity and contributors",
"manual",
)
]
man_pages = [(master_doc, "lasio", u"lasio Documentation", [author], 1)]
texinfo_documents = [
(
master_doc,
"lasio",
u"lasio Documentation",
author,
"lasio",
"One line description of project.",
"Miscellaneous",
)
]
intersphinx_mapping = {"https://docs.python.org/": None}
| mit | 1,104,639,226,589,506,400 | 18.023256 | 72 | 0.618582 | false |
johnraz/django-rest-framework | tests/test_validation.py | 3 | 8376 | from __future__ import unicode_literals
import re
from django.core.validators import MaxValueValidator, RegexValidator
from django.db import models
from django.test import TestCase
from rest_framework import generics, serializers, status
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
# Regression for #666
class ValidationModel(models.Model):
blank_validated_field = models.CharField(max_length=255)
class ValidationModelSerializer(serializers.ModelSerializer):
class Meta:
model = ValidationModel
fields = ('blank_validated_field',)
read_only_fields = ('blank_validated_field',)
class UpdateValidationModel(generics.RetrieveUpdateDestroyAPIView):
queryset = ValidationModel.objects.all()
serializer_class = ValidationModelSerializer
# Regression for #653
class ShouldValidateModel(models.Model):
should_validate_field = models.CharField(max_length=255)
class ShouldValidateModelSerializer(serializers.ModelSerializer):
renamed = serializers.CharField(source='should_validate_field', required=False)
def validate_renamed(self, value):
if len(value) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
return value
class Meta:
model = ShouldValidateModel
fields = ('renamed',)
class TestNestedValidationError(TestCase):
def test_nested_validation_error_detail(self):
"""
Ensure nested validation error detail is rendered correctly.
"""
e = serializers.ValidationError({
'nested': {
'field': ['error'],
}
})
self.assertEqual(serializers.get_validation_error_detail(e), {
'nested': {
'field': ['error'],
}
})
class TestPreSaveValidationExclusionsSerializer(TestCase):
def test_renamed_fields_are_model_validated(self):
"""
Ensure fields with 'source' applied do get still get model validation.
"""
# We've set `required=False` on the serializer, but the model
# does not have `blank=True`, so this serializer should not validate.
serializer = ShouldValidateModelSerializer(data={'renamed': ''})
self.assertEqual(serializer.is_valid(), False)
self.assertIn('renamed', serializer.errors)
self.assertNotIn('should_validate_field', serializer.errors)
class TestCustomValidationMethods(TestCase):
def test_custom_validation_method_is_executed(self):
serializer = ShouldValidateModelSerializer(data={'renamed': 'fo'})
self.assertFalse(serializer.is_valid())
self.assertIn('renamed', serializer.errors)
def test_custom_validation_method_passing(self):
serializer = ShouldValidateModelSerializer(data={'renamed': 'foo'})
self.assertTrue(serializer.is_valid())
class ValidationSerializer(serializers.Serializer):
foo = serializers.CharField()
def validate_foo(self, attrs, source):
raise serializers.ValidationError("foo invalid")
def validate(self, attrs):
raise serializers.ValidationError("serializer invalid")
class TestAvoidValidation(TestCase):
"""
If serializer was initialized with invalid data (None or non dict-like), it
should avoid validation layer (validate_<field> and validate methods)
"""
def test_serializer_errors_has_only_invalid_data_error(self):
serializer = ValidationSerializer(data='invalid data')
self.assertFalse(serializer.is_valid())
self.assertDictEqual(serializer.errors, {
'non_field_errors': [
'Invalid data. Expected a dictionary, but got %s.' % type('').__name__
]
})
# regression tests for issue: 1493
class ValidationMaxValueValidatorModel(models.Model):
number_value = models.PositiveIntegerField(validators=[MaxValueValidator(100)])
class ValidationMaxValueValidatorModelSerializer(serializers.ModelSerializer):
class Meta:
model = ValidationMaxValueValidatorModel
fields = '__all__'
class UpdateMaxValueValidationModel(generics.RetrieveUpdateDestroyAPIView):
queryset = ValidationMaxValueValidatorModel.objects.all()
serializer_class = ValidationMaxValueValidatorModelSerializer
class TestMaxValueValidatorValidation(TestCase):
def test_max_value_validation_serializer_success(self):
serializer = ValidationMaxValueValidatorModelSerializer(data={'number_value': 99})
self.assertTrue(serializer.is_valid())
def test_max_value_validation_serializer_fails(self):
serializer = ValidationMaxValueValidatorModelSerializer(data={'number_value': 101})
self.assertFalse(serializer.is_valid())
self.assertDictEqual({'number_value': ['Ensure this value is less than or equal to 100.']}, serializer.errors)
def test_max_value_validation_success(self):
obj = ValidationMaxValueValidatorModel.objects.create(number_value=100)
request = factory.patch('/{0}'.format(obj.pk), {'number_value': 98}, format='json')
view = UpdateMaxValueValidationModel().as_view()
response = view(request, pk=obj.pk).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_max_value_validation_fail(self):
obj = ValidationMaxValueValidatorModel.objects.create(number_value=100)
request = factory.patch('/{0}'.format(obj.pk), {'number_value': 101}, format='json')
view = UpdateMaxValueValidationModel().as_view()
response = view(request, pk=obj.pk).render()
self.assertEqual(response.content, b'{"number_value":["Ensure this value is less than or equal to 100."]}')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# regression tests for issue: 1533
class TestChoiceFieldChoicesValidate(TestCase):
CHOICES = [
(0, 'Small'),
(1, 'Medium'),
(2, 'Large'),
]
SINGLE_CHOICES = [0, 1, 2]
CHOICES_NESTED = [
('Category', (
(1, 'First'),
(2, 'Second'),
(3, 'Third'),
)),
(4, 'Fourth'),
]
MIXED_CHOICES = [
('Category', (
(1, 'First'),
(2, 'Second'),
)),
3,
(4, 'Fourth'),
]
def test_choices(self):
"""
Make sure a value for choices works as expected.
"""
f = serializers.ChoiceField(choices=self.CHOICES)
value = self.CHOICES[0][0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail("Value %s does not validate" % str(value))
def test_single_choices(self):
"""
Make sure a single value for choices works as expected.
"""
f = serializers.ChoiceField(choices=self.SINGLE_CHOICES)
value = self.SINGLE_CHOICES[0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail("Value %s does not validate" % str(value))
def test_nested_choices(self):
"""
Make sure a nested value for choices works as expected.
"""
f = serializers.ChoiceField(choices=self.CHOICES_NESTED)
value = self.CHOICES_NESTED[0][1][0][0]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail("Value %s does not validate" % str(value))
def test_mixed_choices(self):
"""
Make sure mixed values for choices works as expected.
"""
f = serializers.ChoiceField(choices=self.MIXED_CHOICES)
value = self.MIXED_CHOICES[1]
try:
f.to_internal_value(value)
except serializers.ValidationError:
self.fail("Value %s does not validate" % str(value))
class RegexSerializer(serializers.Serializer):
pin = serializers.CharField(
validators=[RegexValidator(regex=re.compile('^[0-9]{4,6}$'),
message='A PIN is 4-6 digits')])
expected_repr = """
RegexSerializer():
pin = CharField(validators=[<django.core.validators.RegexValidator object>])
""".strip()
class TestRegexSerializer(TestCase):
def test_regex_repr(self):
serializer_repr = repr(RegexSerializer())
assert serializer_repr == expected_repr
| bsd-2-clause | -5,959,678,454,141,171,000 | 32.370518 | 118 | 0.657116 | false |
googleapis/gapic-generator-python | tests/integration/goldens/credentials/google/iam/credentials_v1/services/iam_credentials/transports/__init__.py | 2 | 1194 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import IAMCredentialsTransport
from .grpc import IAMCredentialsGrpcTransport
from .grpc_asyncio import IAMCredentialsGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[IAMCredentialsTransport]]
_transport_registry['grpc'] = IAMCredentialsGrpcTransport
_transport_registry['grpc_asyncio'] = IAMCredentialsGrpcAsyncIOTransport
__all__ = (
'IAMCredentialsTransport',
'IAMCredentialsGrpcTransport',
'IAMCredentialsGrpcAsyncIOTransport',
)
| apache-2.0 | -88,436,349,026,459,100 | 35.181818 | 85 | 0.777219 | false |
NeCTAR-RC/ceilometer | ceilometer/tests/alarm/evaluator/test_base.py | 1 | 5673 | # -*- encoding: utf-8 -*-
#
# Copyright © 2013 IBM Corp
#
# Author: Tong Li <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""class for tests in ceilometer/alarm/evaluator/__init__.py
"""
import datetime
import mock
import pytz
from ceilometer.alarm import evaluator
from ceilometer.openstack.common import test
from ceilometer.openstack.common import timeutils
class TestEvaluatorBaseClass(test.BaseTestCase):
def setUp(self):
super(TestEvaluatorBaseClass, self).setUp()
self.called = False
def _notify(self, alarm, previous, reason, details):
self.called = True
raise Exception('Boom!')
def test_base_refresh(self):
notifier = mock.MagicMock()
notifier.notify = self._notify
class EvaluatorSub(evaluator.Evaluator):
def evaluate(self, alarm):
pass
ev = EvaluatorSub(notifier)
ev.api_client = mock.MagicMock()
ev._refresh(mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), mock.MagicMock())
self.assertTrue(self.called)
def test_base_time_constraints(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': ''},
{'name': 'test2',
'description': 'test',
'start': '0 23 * * *', # daily at 23:00
'duration': 10800, # 3 hours
'timezone': ''},
]
cls = evaluator.Evaluator
timeutils.set_time_override(datetime.datetime(2014, 1, 1, 12, 0, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 2, 1, 0, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 2, 5, 0, 0))
self.assertFalse(cls.within_time_constraint(alarm))
def test_base_time_constraints_complex(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
# Every consecutive 2 minutes (from the 3rd to the 57th) past
# every consecutive 2 hours (between 3:00 and 12:59) on every day.
'start': '3-57/2 3-12/2 * * *',
'duration': 30,
'timezone': ''}
]
cls = evaluator.Evaluator
# test minutes inside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 3, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 57, 0))
self.assertTrue(cls.within_time_constraint(alarm))
# test minutes outside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 2, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 4, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 58, 0))
self.assertFalse(cls.within_time_constraint(alarm))
# test hours inside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 3, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 5, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 11, 31, 0))
self.assertTrue(cls.within_time_constraint(alarm))
# test hours outside
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 1, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 4, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
timeutils.set_time_override(datetime.datetime(2014, 1, 5, 12, 31, 0))
self.assertFalse(cls.within_time_constraint(alarm))
def test_base_time_constraints_timezone(self):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': 'Europe/Ljubljana'}
]
cls = evaluator.Evaluator
dt_eu = datetime.datetime(2014, 1, 1, 12, 0, 0,
tzinfo=pytz.timezone('Europe/Ljubljana'))
dt_us = datetime.datetime(2014, 1, 1, 12, 0, 0,
tzinfo=pytz.timezone('US/Eastern'))
timeutils.set_time_override(dt_eu.astimezone(pytz.UTC))
self.assertTrue(cls.within_time_constraint(alarm))
timeutils.set_time_override(dt_us.astimezone(pytz.UTC))
self.assertFalse(cls.within_time_constraint(alarm))
| apache-2.0 | 1,308,871,319,242,010,000 | 39.805755 | 79 | 0.618124 | false |
Subsets and Splits